ngram
listlengths
0
67.8k
[ "= 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes =", "snp_id, genos, xs, ys samples = [] for sample_name in sample_names: chr_dict =", "curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def", "{ 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count,", "matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args = parser.parse_args() import_samples( args.platform,", "for x in slice] def get_data(data_row): # the '+ 1' is because we", "geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle:", "open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv,", "'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos,", "data we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that should", "name that should be associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype", "= '-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] =", "in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys':", "# grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table", "parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db()) if __name__ == '__main__':", "[] for sample_name in sample_names: chr_dict = dict() for chr in platform_chrs: curr_snp_count", "db) curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE]", "all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X", "slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def get_data(data_row):", "csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already", "xs = [float(x) for x in get_data(next_x_row)] ys = [float(y) for y in", "eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that should be associated with all", "because we need to shift right to accommodate the SNP ID column slice", "sample_names: # we've already imported all of the samples return x_sample_names = get_sample_names(next(x_matrix_table))", "import csv import haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv,", "[x.strip() for x in slice] def get_data(data_row): # the '+ 1' is because", "[float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db),", "curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags,", "snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if", "for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno", "we need to shift right to accommodate the SNP ID column slice =", "report with probe intensities') parser.add_argument( 'platform', help='the platform for the data we are", "curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample = {", "newline='') as y_matrix_handle: # grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table", "ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno", "curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command line arguments parser = argparse.ArgumentParser(description='import", "curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ',", "x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names !=", "mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse", "header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def get_data(data_row): # the", "curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return", "snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not match", "', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command line arguments parser", "ys samples = [] for sample_name in sample_names: chr_dict = dict() for chr", "'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix')", "if not sample_names: # we've already imported all of the samples return x_sample_names", "[], } samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id)", "SAMPLE_BATCH_SIZE def main(): # parse command line arguments parser = argparse.ArgumentParser(description='import the final", "curr_x = xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index]", "with probe intensities') parser.add_argument( 'platform', help='the platform for the data we are importing.", "snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not match in files') genos =", "xs, ys samples = [] for sample_name in sample_names: chr_dict = dict() for", "intensities') parser.add_argument( 'platform', help='the platform for the data we are importing. eg: MegaMUGA')", "next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id !=", "snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for", "snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno =", "curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno = '-' curr_x = xs[i]", "as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as", "sample_names: chr_dict = dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] =", "genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated", "x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: #", "= [] for sample_name in sample_names: chr_dict = dict() for chr in platform_chrs:", "= ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] =", "next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id", "that should be associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values", "} samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if", "'+ 1' is because we need to shift right to accommodate the SNP", "next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if", "xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr", "def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table)", "[float(y) for y in get_data(next_y_row)] yield snp_id, genos, xs, ys samples = []", "for x in get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)] yield snp_id,", "of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names !=", "\\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: #", "sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index", "= get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names != y_sample_names:", "y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names != y_sample_names: raise Exception('sample", "'.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command line arguments parser =", "get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)] ys = [float(y) for y", "x in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='')", "get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)] yield snp_id, genos, xs, ys", "= csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported all", "files') genos = get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)] ys =", "matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db()) if", "def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in slice]", "slice] def get_data(data_row): # the '+ 1' is because we need to shift", "while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id =", "return [x.strip() for x in slice] def get_data(data_row): # the '+ 1' is", "as y_matrix_handle: # grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table =", "+ 1] return [x.strip() for x in slice] with open(geno_matrix_csv, 'r', newline='') as", "for sample_name in sample_names: chr_dict = dict() for chr in platform_chrs: curr_snp_count =", "in get_data(next_y_row)] yield snp_id, genos, xs, ys samples = [] for sample_name in", "snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not", "do not match in files') genos = get_data(next_geno_row) xs = [float(x) for x", "sample_names != y_sample_names: raise Exception('sample IDs do not match in files') def make_snp_stream():", "* curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids':", "next_y_row[0].strip(): raise Exception('snp IDs do not match in files') genos = get_data(next_geno_row) xs", "= next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs", "not match in files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row =", "'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream():", "get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs do", "names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table))", "in slice] def get_data(data_row): # the '+ 1' is because we need to", "= dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs':", "import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index", "next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise", "'-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x", "next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip()", "'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for", "mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index +", "for the data we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name", "csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names:", "yield snp_id, genos, xs, ys samples = [] for sample_name in sample_names: chr_dict", "for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE", "snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N':", "platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')]", "# we've already imported all of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names", "curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names))", "match in files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table)", "'snps': ['-'] * curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id':", "data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in slice]", "!= next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not match in", "get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported all of the samples return", "+ SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def get_data(data_row): # the '+", "if curr_geno == 'N': curr_geno = '-' curr_x = xs[i] curr_y = ys[i]", "enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno = '-' curr_x =", "genos, xs, ys samples = [] for sample_name in sample_names: chr_dict = dict()", "= mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index", "for y in get_data(next_y_row)] yield snp_id, genos, xs, ys samples = [] for", "y_sample_names: raise Exception('sample IDs do not match in files') def make_snp_stream(): while True:", "geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if", "= snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper()", "# parse command line arguments parser = argparse.ArgumentParser(description='import the final report with probe", "imported all of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if", "snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno", "= csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not", "samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): #", "accommodate the SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE +", "x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0", "in sample_names: chr_dict = dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr]", "are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that should be associated", "'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument(", "help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args", "probe intensities') parser.add_argument( 'platform', help='the platform for the data we are importing. eg:", "help='a tag name that should be associated with all imported samples') parser.add_argument( 'geno_matrix_csv',", "parser = argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument( 'platform', help='the platform", "platform for the data we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag", "X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args =", "[float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample", "importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that should be associated with", "!= next_y_row[0].strip(): raise Exception('snp IDs do not match in files') genos = get_data(next_geno_row)", "curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name],", "chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs, ys in", "parser.add_argument( 'tag', help='a tag name that should be associated with all imported samples')", "curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in", "= next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or", "the SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1]", "{ 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [],", "= next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip():", "= snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index']", "\\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the current sample names geno_matrix_table", "True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip()", "= [float(y) for y in get_data(next_y_row)] yield snp_id, genos, xs, ys samples =", "return [x.strip() for x in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\", "do not match in files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row", "as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs,", "20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform,", "open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab", "sample_names != x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs do not match", "genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None:", "= get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported all of the samples", "if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not", "is because we need to shift right to accommodate the SNP ID column", "get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names != y_sample_names: raise", "tag name that should be associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated", "parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv,", "newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='')", "slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle,", "curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples:", "parser.add_argument( 'platform', help='the platform for the data we are importing. eg: MegaMUGA') parser.add_argument(", "= snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count,", "in files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row", "match in files') genos = get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)]", "'N': curr_geno = '-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr]", "snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample", "curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict,", "samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command line arguments", "curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno = '-'", "files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row =", "i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno =", "= csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've", "= { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] *", "chr_dict = dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = {", "not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples):", "intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args = parser.parse_args()", "= genos[i].upper() if curr_geno == 'N': curr_geno = '-' curr_x = xs[i] curr_y", "def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db)", "the final report with probe intensities') parser.add_argument( 'platform', help='the platform for the data", "curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] *", "the data we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that", "the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names", "= curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:',", "be associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument(", "right to accommodate the SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index +", "sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names =", "matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity", "if sample_names != x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs do not", "IDs do not match in files') def make_snp_stream(): while True: next_geno_row = next(geno_matrix_table)", "'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs, ys", "= parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db()) if __name__ ==", "to shift right to accommodate the SNP ID column slice = data_row[curr_sample_start_index +", "+ 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in slice] with", "return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names", "get_data(next_y_row)] yield snp_id, genos, xs, ys samples = [] for sample_name in sample_names:", "if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i,", "= argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument( 'platform', help='the platform for", "curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index +=", "'platform', help='the platform for the data we are importing. eg: MegaMUGA') parser.add_argument( 'tag',", "'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r',", "* curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data':", "raise Exception('snp IDs do not match in files') genos = get_data(next_geno_row) xs =", "!= x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs do not match in", "snp_id = next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp", "= curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample", "= curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index", "SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in slice] with open(geno_matrix_csv, 'r', newline='')", "help='the platform for the data we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a", "newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the current", "MegaMUGA') parser.add_argument( 'tag', help='a tag name that should be associated with all imported", "[float(x) for x in get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)] yield", "parse command line arguments parser = argparse.ArgumentParser(description='import the final report with probe intensities')", "'r', newline='') as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the", "y_matrix_handle: # grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle)", "already imported all of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table))", "import argparse import csv import haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform,", "'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index =", "x in get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)] yield snp_id, genos,", "chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-']", "curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for", "args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db()) if __name__", "mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr,", "grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table =", "= header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def get_data(data_row): #", "next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id !=", "1' is because we need to shift right to accommodate the SNP ID", "or sample_names != y_sample_names: raise Exception('sample IDs do not match in files') def", "shift right to accommodate the SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index", "for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] *", "arguments parser = argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument( 'platform', help='the", "with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated", "imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity", "samples = [] for sample_name in sample_names: chr_dict = dict() for chr in", "Y intensity values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag,", "y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while", "haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db):", "curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted", "current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle) sample_names", "make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index", "None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno", "samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values", "'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, }", "help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv',", "values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db())", "argparse import csv import haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv,", "snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome'] snp_index =", "not sample_names: # we've already imported all of the samples return x_sample_names =", "chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count,", "argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument( 'platform', help='the platform for the", "db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def", "SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return", "== 'N': curr_geno = '-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr =", "# the '+ 1' is because we need to shift right to accommodate", "'r', newline='') as y_matrix_handle: # grab the current sample names geno_matrix_table = csv.reader(geno_matrix_handle)", "ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr =", "* curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample =", "'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], }", "associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv',", "in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as", "x in slice] def get_data(data_row): # the '+ 1' is because we need", "samples.append(curr_sample) for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index", "the '+ 1' is because we need to shift right to accommodate the", "import haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags,", "= { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps':", "final report with probe intensities') parser.add_argument( 'platform', help='the platform for the data we", "y_matrix_table = csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported", "in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main():", "csv.reader(y_matrix_handle) sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported all of", "main(): # parse command line arguments parser = argparse.ArgumentParser(description='import the final report with", "'ys': [float('nan')] * curr_snp_count, 'snps': ['-'] * curr_snp_count, } curr_sample = { 'sample_id':", "csv import haploqa.mongods as mds SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv,", "column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for", "platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id, genos, xs,", "need to shift right to accommodate the SNP ID column slice = data_row[curr_sample_start_index", "the current sample names geno_matrix_table = csv.reader(geno_matrix_handle) x_matrix_table = csv.reader(x_matrix_handle) y_matrix_table = csv.reader(y_matrix_handle)", "x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the current sample names", "genos = get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)] ys = [float(y)", "= curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample)", "open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the current sample names geno_matrix_table =", "in get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)] yield snp_id, genos, xs,", "= 0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip()", "db.samples.insert_one(curr_sample) print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command", "x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs do not match in files')", "get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def", "platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def get_sample_names(header_row):", "geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index =", "curr_geno == 'N': curr_geno = '-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr", "= next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id = next_geno_row[0].strip() if snp_id", "all of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names", "parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix')", "values matrix') parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y", "'tag', help='a tag name that should be associated with all imported samples') parser.add_argument(", "samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names = get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or", "sample_names = get_sample_names(next(geno_matrix_table)) if not sample_names: # we've already imported all of the", "= get_sample_names(next(y_matrix_table)) if sample_names != x_sample_names or sample_names != y_sample_names: raise Exception('sample IDs", "as x_matrix_handle, \\ open(y_matrix_csv, 'r', newline='') as y_matrix_handle: # grab the current sample", "command line arguments parser = argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument(", "0 while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for", "with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \\", "} curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags':", "next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not match in files')", "IDs do not match in files') genos = get_data(next_geno_row) xs = [float(x) for", "parser.add_argument( 'x_matrix_csv', help='comma-separated X intensity values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values", "while True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x", "def get_data(data_row): # the '+ 1' is because we need to shift right", "ys = [float(y) for y in get_data(next_y_row)] yield snp_id, genos, xs, ys samples", "raise Exception('sample IDs do not match in files') def make_snp_stream(): while True: next_geno_row", "= get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)] ys = [float(y) for", "snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')] * curr_snp_count, 'ys': [float('nan')] * curr_snp_count, 'snps':", "= data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in", "next_geno_row[0].strip() if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do", "def main(): # parse command line arguments parser = argparse.ArgumentParser(description='import the final report", "mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample)", "!= y_sample_names: raise Exception('sample IDs do not match in files') def make_snp_stream(): while", "in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is not None: snp_chr = snp_chr_index['chromosome']", "= xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] =", "in files') genos = get_data(next_geno_row) xs = [float(x) for x in get_data(next_x_row)] ys", "y in get_data(next_y_row)] yield snp_id, genos, xs, ys samples = [] for sample_name", "['-'] * curr_snp_count, } curr_sample = { 'sample_id': mds.gen_unique_id(db), 'other_ids': [sample_name], 'platform_id': platform,", "for x in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv, 'r',", "Exception('sample IDs do not match in files') def make_snp_stream(): while True: next_geno_row =", "we've already imported all of the samples return x_sample_names = get_sample_names(next(x_matrix_table)) y_sample_names =", "SAMPLE_BATCH_SIZE] return [x.strip() for x in slice] def get_data(data_row): # the '+ 1'", "dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr] chr_dict[chr] = { 'xs': [float('nan')]", "should be associated with all imported samples') parser.add_argument( 'geno_matrix_csv', help='comma-separated genotype values matrix')", "+ SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in slice] with open(geno_matrix_csv, 'r',", "'y_matrix_csv', help='comma-separated Y intensity values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv,", "intensity values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform],", "SAMPLE_BATCH_SIZE = 20 def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes", "1] return [x.strip() for x in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle,", "xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y", "+= SAMPLE_BATCH_SIZE def main(): # parse command line arguments parser = argparse.ArgumentParser(description='import the", "snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice", "= snp_chr_index['index'] for i, curr_sample in enumerate(samples): curr_geno = genos[i].upper() if curr_geno ==", "to accommodate the SNP ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE", "in enumerate(samples): curr_geno = genos[i].upper() if curr_geno == 'N': curr_geno = '-' curr_x", "get_data(data_row): # the '+ 1' is because we need to shift right to", "or snp_id != next_y_row[0].strip(): raise Exception('snp IDs do not match in files') genos", "line arguments parser = argparse.ArgumentParser(description='import the final report with probe intensities') parser.add_argument( 'platform',", "True: def get_sample_names(header_row): slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE] return [x.strip() for x in", "Exception('snp IDs do not match in files') genos = get_data(next_geno_row) xs = [float(x)", "[sample_name], 'platform_id': platform, 'chromosome_data': chr_dict, 'tags': sample_tags, 'unannotated_snps': [], } samples.append(curr_sample) for snp_id,", "slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for x", "values matrix') parser.add_argument( 'y_matrix_csv', help='comma-separated Y intensity values matrix') args = parser.parse_args() import_samples(", "ID column slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip()", "1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1] return [x.strip() for x in slice] with open(geno_matrix_csv,", "= [float(x) for x in get_data(next_x_row)] ys = [float(y) for y in get_data(next_y_row)]", "sample_tags, db): platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True:", "import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv, [args.tag, args.platform], mds.init_db()) if __name__ == '__main__': main()", "make_snp_stream(): while True: next_geno_row = next(geno_matrix_table) next_x_row = next(x_matrix_table) next_y_row = next(y_matrix_table) snp_id", "sample_name in sample_names: chr_dict = dict() for chr in platform_chrs: curr_snp_count = snp_count_per_chr[chr]", "not match in files') genos = get_data(next_geno_row) xs = [float(x) for x in", "snp_chr_indexes = mds.within_chr_snp_indices(platform, db) curr_sample_start_index = 0 while True: def get_sample_names(header_row): slice =", "curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index] = curr_geno for curr_sample in samples: mds.post_proc_sample(curr_sample) db.samples.insert_one(curr_sample)", "we are importing. eg: MegaMUGA') parser.add_argument( 'tag', help='a tag name that should be", "for snp_id, genos, xs, ys in make_snp_stream(): snp_chr_index = snp_chr_indexes.get(snp_id) if snp_chr_index is", "curr_geno = '-' curr_x = xs[i] curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index]", "is not None: snp_chr = snp_chr_index['chromosome'] snp_index = snp_chr_index['index'] for i, curr_sample in", "print('inserted samples:', ', '.join(sample_names)) curr_sample_start_index += SAMPLE_BATCH_SIZE def main(): # parse command line", "curr_y = ys[i] curr_sample_chr = curr_sample['chromosome_data'][snp_chr] curr_sample_chr['xs'][snp_index] = curr_x curr_sample_chr['ys'][snp_index] = curr_y curr_sample_chr['snps'][snp_index]", "help='comma-separated Y intensity values matrix') args = parser.parse_args() import_samples( args.platform, args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv,", "[x.strip() for x in slice] with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \\ open(x_matrix_csv,", "genos[i].upper() if curr_geno == 'N': curr_geno = '-' curr_x = xs[i] curr_y =" ]
[ "input file path jsonPath = basepath + 'Hangouts.json' # OUTPUT: These are the", "# Author: <NAME> import json # JSON to handle Google's format import re", "This is the path to a temporary intermediate file tempPath = basepath +", "n in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in e: x =", "intermediate file has been written # Now, run a wordcount # Read in", "= basepath + 'Hangouts.json' # OUTPUT: These are the output file paths. dict", "through Google's weird JSON format and picks out the chat text for n", "an intermediate file outFile.write(xtext) c += 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close()", "c += 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate", "the number of chat messages # This loops through Google's weird JSON format", "wordcount for l in range(len(s)): line = s[l].lower().strip() # strip unnecessary white space", "in the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8')", "in e: x = e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type'] xtext", "is the path to a temporary intermediate file tempPath = basepath + 'hangouttemp.txt'", "e in n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content'] if 'segment' in", "JSON to handle Google's format import re # regular expressions # CHANGE THIS.", "outFile.close() # Sort the wordcount in descending order of frequency and write to", "= basepath + 'hangoutfreq.txt' # This is the path to a temporary intermediate", "in descending order of frequency and write to file outFile = open(mainFreqPath, 'w',", "xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if xtype == u'TEXT':", "This loops through Google's weird JSON format and picks out the chat text", "Hangouts JSON file and produces a wordcount # Author: <NAME> import json #", "expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT:", "file paths. dict = sorted alphabetical; freq = sorted by frequency mainDictPath =", "all the data p = json.load(jsonFile) c = 0 # Count the number", "= json.load(jsonFile) c = 0 # Count the number of chat messages #", "the input file path jsonPath = basepath + 'Hangouts.json' # OUTPUT: These are", "Write out the chat text to an intermediate file outFile.write(xtext) c += 1", "and produces a wordcount # Author: <NAME> import json # JSON to handle", "JSON file and produces a wordcount # Author: <NAME> import json # JSON", "+ 'hangouttemp.txt' # Read in the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8')", "to handle Google's format import re # regular expressions # CHANGE THIS. For", "use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file path", "sorted alphabetical; freq = sorted by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath", "like a dictionary and write to file outFile = open(mainDictPath, 'w', encoding='utf8') for", "space line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove", "open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is the variable that", "the intermediate file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} #", "outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending order of frequency and", "a wordcount # Read in the intermediate file inFile = open(tempPath,'r', encoding='utf8') s", "= x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if xtype == u'TEXT': #", "# Count the number of chat messages # This loops through Google's weird", "loops through Google's weird JSON format and picks out the chat text for", "intermediate file outFile.write(xtext) c += 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close()", "if xtype == u'TEXT': # Write out the chat text to an intermediate", "p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content'] if", "wordcount in descending order of frequency and write to file outFile = open(mainFreqPath,", "if word not in wordcount: wordcount[word] = 1 else: wordcount[word] += 1 #", "open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close()", "This is the input file path jsonPath = basepath + 'Hangouts.json' # OUTPUT:", "'segment' in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if", "re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the rest for", "outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending order", "are the output file paths. dict = sorted alphabetical; freq = sorted by", "by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' #", "+ 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This is the path to", "= sorted by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath +", "# JSON to handle Google's format import re # regular expressions # CHANGE", "re # regular expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath =", "in a Google Hangouts JSON file and produces a wordcount # Author: <NAME>", "for n in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in e: x", "open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k))", "= e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] +", "Now, run a wordcount # Read in the intermediate file inFile = open(tempPath,'r',", "encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort", "the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') #", "descending order of frequency and write to file outFile = open(mainFreqPath, 'w', encoding='utf8')", "c = 0 # Count the number of chat messages # This loops", "sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending", "weird JSON format and picks out the chat text for n in p['conversation_state']:", "the rest for word in line.split(): if word not in wordcount: wordcount[word] =", "variable that contains all the data p = json.load(jsonFile) c = 0 #", "a temporary intermediate file tempPath = basepath + 'hangouttemp.txt' # Read in the", "chat text to an intermediate file outFile.write(xtext) c += 1 print(u'Total number of", "'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() #", "linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file", "the variable that contains all the data p = json.load(jsonFile) c = 0", "{0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has been written # Now, run", "# Now, run a wordcount # Read in the intermediate file inFile =", "= open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount", "dict = sorted alphabetical; freq = sorted by frequency mainDictPath = basepath +", "e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\"", "inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount for l in range(len(s)): line", "# strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ', line) # keep", "file outFile.write(xtext) c += 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() #", "a dictionary and write to file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v", "0 # Count the number of chat messages # This loops through Google's", "program reads in a Google Hangouts JSON file and produces a wordcount #", "file path jsonPath = basepath + 'Hangouts.json' # OUTPUT: These are the output", "regular expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' #", "outFile = open(tempPath,'w', encoding='utf8') # 'p' is the variable that contains all the", "== u'TEXT': # Write out the chat text to an intermediate file outFile.write(xtext)", "order of frequency and write to file outFile = open(mainFreqPath, 'w', encoding='utf8') for", "encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k)) outFile.write(u' ')", "file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is", "number of chat messages # This loops through Google's weird JSON format and", "# INPUT: This is the input file path jsonPath = basepath + 'Hangouts.json'", "picks out the chat text for n in p['conversation_state']: for e in n['conversation_state']['event']:", "json.load(jsonFile) c = 0 # Count the number of chat messages # This", "the data p = json.load(jsonFile) c = 0 # Count the number of", "white space line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and", "produces a wordcount # Author: <NAME> import json # JSON to handle Google's", "for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the", "written # Now, run a wordcount # Read in the intermediate file inFile", "intermediate file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The", "text for n in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in e:", "dictionary for wordcount for l in range(len(s)): line = s[l].lower().strip() # strip unnecessary", "l in range(len(s)): line = s[l].lower().strip() # strip unnecessary white space line =", "Google's weird JSON format and picks out the chat text for n in", "Sort the wordcount like a dictionary and write to file outFile = open(mainDictPath,", "# regular expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\'", "the chat text for n in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message'", "sorted by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt'", "= basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This is the", "= open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n')", "line.split(): if word not in wordcount: wordcount[word] = 1 else: wordcount[word] += 1", "'hangoutfreq.txt' # This is the path to a temporary intermediate file tempPath =", "to a temporary intermediate file tempPath = basepath + 'hangouttemp.txt' # Read in", "k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount", "u' ', line) # keep only alphabets and remove the rest for word", "the chat text to an intermediate file outFile.write(xtext) c += 1 print(u'Total number", "jsonPath = basepath + 'Hangouts.json' # OUTPUT: These are the output file paths.", "# 'p' is the variable that contains all the data p = json.load(jsonFile)", "Google's format import re # regular expressions # CHANGE THIS. For linux/mac, use", "json # JSON to handle Google's format import re # regular expressions #", "in line.split(): if word not in wordcount: wordcount[word] = 1 else: wordcount[word] +=", "'p' is the variable that contains all the data p = json.load(jsonFile) c", "= open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True):", "1 else: wordcount[word] += 1 # Sort the wordcount like a dictionary and", "# This program reads in a Google Hangouts JSON file and produces a", "'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file path jsonPath = basepath +", "file has been written # Now, run a wordcount # Read in the", "the wordcount like a dictionary and write to file outFile = open(mainDictPath, 'w',", "basepath + 'hangouttemp.txt' # Read in the JSON file jsonFile = open(jsonPath, 'r',", "# OUTPUT: These are the output file paths. dict = sorted alphabetical; freq", "in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content']", "# Read in the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile =", "else: wordcount[word] += 1 # Sort the wordcount like a dictionary and write", "= open(tempPath,'w', encoding='utf8') # 'p' is the variable that contains all the data", "e: x = e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type'] xtext =", "JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p'", "x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if xtype ==", "Read in the intermediate file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close()", "CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is", "of chat messages # This loops through Google's weird JSON format and picks", "For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input", "Google Hangouts JSON file and produces a wordcount # Author: <NAME> import json", "Count the number of chat messages # This loops through Google's weird JSON", "format and picks out the chat text for n in p['conversation_state']: for e", "if 'chat_message' in e: x = e['chat_message']['message_content'] if 'segment' in x: xtype =", "is the variable that contains all the data p = json.load(jsonFile) c =", "= s[l].lower().strip() # strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ', line)", "the wordcount in descending order of frequency and write to file outFile =", "basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file path jsonPath =", "in the intermediate file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={}", "') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending order of frequency", "line) # keep only alphabets and remove the rest for word in line.split():", "<NAME> import json # JSON to handle Google's format import re # regular", "THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the", "outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w: w[1],", "# Sort the wordcount like a dictionary and write to file outFile =", "outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending order of frequency and write", "import json # JSON to handle Google's format import re # regular expressions", "freq = sorted by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath", "# Write out the chat text to an intermediate file outFile.write(xtext) c +=", "word not in wordcount: wordcount[word] = 1 else: wordcount[word] += 1 # Sort", "number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has been written", "mainFreqPath = basepath + 'hangoutfreq.txt' # This is the path to a temporary", "outFile.write(xtext) c += 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The", "if 'segment' in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \"", "that contains all the data p = json.load(jsonFile) c = 0 # Count", "1 # Sort the wordcount like a dictionary and write to file outFile", "word in line.split(): if word not in wordcount: wordcount[word] = 1 else: wordcount[word]", "wordcount # Author: <NAME> import json # JSON to handle Google's format import", "tempPath = basepath + 'hangouttemp.txt' # Read in the JSON file jsonFile =", "= 0 # Count the number of chat messages # This loops through", "rest for word in line.split(): if word not in wordcount: wordcount[word] = 1", "wordcount[word] = 1 else: wordcount[word] += 1 # Sort the wordcount like a", "import re # regular expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath", "strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only", "outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v))", "# CHANGE THIS. For linux/mac, use '/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This", "remove the rest for word in line.split(): if word not in wordcount: wordcount[word]", "Author: <NAME> import json # JSON to handle Google's format import re #", "The intermediate file has been written # Now, run a wordcount # Read", "file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The dictionary", "out the chat text for n in p['conversation_state']: for e in n['conversation_state']['event']: if", "= x['segment'][0]['text'] + u\" \" if xtype == u'TEXT': # Write out the", "file outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w:", "inFile.close() wordcount={} # The dictionary for wordcount for l in range(len(s)): line =", "has been written # Now, run a wordcount # Read in the intermediate", "= re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the rest", "These are the output file paths. dict = sorted alphabetical; freq = sorted", "to file outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda", "range(len(s)): line = s[l].lower().strip() # strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u'", "open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount for", "basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This is the path", "# keep only alphabets and remove the rest for word in line.split(): if", "and remove the rest for word in line.split(): if word not in wordcount:", "outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in descending order of", "'w', encoding='utf8') for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k)) outFile.write(u'", "Read in the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w',", "reads in a Google Hangouts JSON file and produces a wordcount # Author:", "'chat_message' in e: x = e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type']", "to file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u'", "= open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is the variable", "k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n')", "for l in range(len(s)): line = s[l].lower().strip() # strip unnecessary white space line", "mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This is", "basepath + 'hangoutfreq.txt' # This is the path to a temporary intermediate file", "wordcount={} # The dictionary for wordcount for l in range(len(s)): line = s[l].lower().strip()", "alphabets and remove the rest for word in line.split(): if word not in", "a wordcount # Author: <NAME> import json # JSON to handle Google's format", "JSON format and picks out the chat text for n in p['conversation_state']: for", "print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has been", "+= 1 # Sort the wordcount like a dictionary and write to file", "This program reads in a Google Hangouts JSON file and produces a wordcount", "been written # Now, run a wordcount # Read in the intermediate file", "n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content'] if 'segment' in x: xtype", "\" if xtype == u'TEXT': # Write out the chat text to an", "= inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount for l in range(len(s)):", "only alphabets and remove the rest for word in line.split(): if word not", "to an intermediate file outFile.write(xtext) c += 1 print(u'Total number of chats: {0:d}'.format(c))", "write to file outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v in sorted(wordcount.items(),", "is the input file path jsonPath = basepath + 'Hangouts.json' # OUTPUT: These", "chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has been written # Now,", "and write to file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()):", "The dictionary for wordcount for l in range(len(s)): line = s[l].lower().strip() # strip", "for word in line.split(): if word not in wordcount: wordcount[word] = 1 else:", "wordcount like a dictionary and write to file outFile = open(mainDictPath, 'w', encoding='utf8')", "line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the", "handle Google's format import re # regular expressions # CHANGE THIS. For linux/mac,", "data p = json.load(jsonFile) c = 0 # Count the number of chat", "inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The dictionary for", "frequency and write to file outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v", "OUTPUT: These are the output file paths. dict = sorted alphabetical; freq =", "'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This is the path to a", "+ 'Hangouts.json' # OUTPUT: These are the output file paths. dict = sorted", "not in wordcount: wordcount[word] = 1 else: wordcount[word] += 1 # Sort the", "encoding='utf8') s = inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount for l", "# The dictionary for wordcount for l in range(len(s)): line = s[l].lower().strip() #", "s = inFile.readlines() inFile.close() wordcount={} # The dictionary for wordcount for l in", "encoding='utf8') # 'p' is the variable that contains all the data p =", "s[l].lower().strip() # strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ', line) #", "# The intermediate file has been written # Now, run a wordcount #", "x['segment'][0]['text'] + u\" \" if xtype == u'TEXT': # Write out the chat", "1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has", "basepath + 'Hangouts.json' # OUTPUT: These are the output file paths. dict =", "'/home/user/restofpath/' basepath = 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file path jsonPath", "# This is the path to a temporary intermediate file tempPath = basepath", "p = json.load(jsonFile) c = 0 # Count the number of chat messages", "and write to file outFile = open(mainFreqPath, 'w', encoding='utf8') for k, v in", "temporary intermediate file tempPath = basepath + 'hangouttemp.txt' # Read in the JSON", "the output file paths. dict = sorted alphabetical; freq = sorted by frequency", "line = s[l].lower().strip() # strip unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ',", "file and produces a wordcount # Author: <NAME> import json # JSON to", "and picks out the chat text for n in p['conversation_state']: for e in", "in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close() # Sort the wordcount in", "output file paths. dict = sorted alphabetical; freq = sorted by frequency mainDictPath", "= 'C:\\\\Users\\\\Pinaky\\\\Desktop\\\\cesmd\\\\gmail_hangout\\\\' # INPUT: This is the input file path jsonPath = basepath", "x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if xtype == u'TEXT': # Write", "v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v)) outFile.write(u'\\n') outFile.close()", "', line) # keep only alphabets and remove the rest for word in", "messages # This loops through Google's weird JSON format and picks out the", "encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is the variable that contains all", "x = e['chat_message']['message_content'] if 'segment' in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text']", "= 1 else: wordcount[word] += 1 # Sort the wordcount like a dictionary", "jsonFile.close() outFile.close() # The intermediate file has been written # Now, run a", "for e in n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content'] if 'segment'", "wordcount # Read in the intermediate file inFile = open(tempPath,'r', encoding='utf8') s =", "chat text for n in p['conversation_state']: for e in n['conversation_state']['event']: if 'chat_message' in", "file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k)) outFile.write(u' ')", "Sort the wordcount in descending order of frequency and write to file outFile", "frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath = basepath + 'hangoutfreq.txt' # This", "'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is the variable that contains", "# Read in the intermediate file inFile = open(tempPath,'r', encoding='utf8') s = inFile.readlines()", "+ 'hangoutfreq.txt' # This is the path to a temporary intermediate file tempPath", "# Sort the wordcount in descending order of frequency and write to file", "wordcount: wordcount[word] = 1 else: wordcount[word] += 1 # Sort the wordcount like", "'hangouttemp.txt' # Read in the JSON file jsonFile = open(jsonPath, 'r', encoding='utf8') outFile", "format import re # regular expressions # CHANGE THIS. For linux/mac, use '/home/user/restofpath/'", "for wordcount for l in range(len(s)): line = s[l].lower().strip() # strip unnecessary white", "in n['conversation_state']['event']: if 'chat_message' in e: x = e['chat_message']['message_content'] if 'segment' in x:", "path to a temporary intermediate file tempPath = basepath + 'hangouttemp.txt' # Read", "= sorted alphabetical; freq = sorted by frequency mainDictPath = basepath + 'hangoutdict.txt'", "the path to a temporary intermediate file tempPath = basepath + 'hangouttemp.txt' #", "run a wordcount # Read in the intermediate file inFile = open(tempPath,'r', encoding='utf8')", "chat messages # This loops through Google's weird JSON format and picks out", "alphabetical; freq = sorted by frequency mainDictPath = basepath + 'hangoutdict.txt' mainFreqPath =", "xtext = x['segment'][0]['text'] + u\" \" if xtype == u'TEXT': # Write out", "'Hangouts.json' # OUTPUT: These are the output file paths. dict = sorted alphabetical;", "= basepath + 'hangouttemp.txt' # Read in the JSON file jsonFile = open(jsonPath,", "keep only alphabets and remove the rest for word in line.split(): if word", "in x: xtype = x['segment'][0]['type'] xtext = x['segment'][0]['text'] + u\" \" if xtype", "outFile.close() # The intermediate file has been written # Now, run a wordcount", "text to an intermediate file outFile.write(xtext) c += 1 print(u'Total number of chats:", "u\" \" if xtype == u'TEXT': # Write out the chat text to", "contains all the data p = json.load(jsonFile) c = 0 # Count the", "dictionary and write to file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in", "# This loops through Google's weird JSON format and picks out the chat", "wordcount[word] += 1 # Sort the wordcount like a dictionary and write to", "for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True): outFile.write(str(k)) outFile.write(u' ') outFile.write(str(v))", "intermediate file tempPath = basepath + 'hangouttemp.txt' # Read in the JSON file", "path jsonPath = basepath + 'Hangouts.json' # OUTPUT: These are the output file", "jsonFile = open(jsonPath, 'r', encoding='utf8') outFile = open(tempPath,'w', encoding='utf8') # 'p' is the", "xtype == u'TEXT': # Write out the chat text to an intermediate file", "in wordcount: wordcount[word] = 1 else: wordcount[word] += 1 # Sort the wordcount", "paths. dict = sorted alphabetical; freq = sorted by frequency mainDictPath = basepath", "+ u\" \" if xtype == u'TEXT': # Write out the chat text", "in range(len(s)): line = s[l].lower().strip() # strip unnecessary white space line = re.sub(u'[^A-Za-z]+',", "file tempPath = basepath + 'hangouttemp.txt' # Read in the JSON file jsonFile", "INPUT: This is the input file path jsonPath = basepath + 'Hangouts.json' #", "unnecessary white space line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets", "u'TEXT': # Write out the chat text to an intermediate file outFile.write(xtext) c", "of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file has been written #", "a Google Hangouts JSON file and produces a wordcount # Author: <NAME> import", "out the chat text to an intermediate file outFile.write(xtext) c += 1 print(u'Total", "write to file outFile = open(mainDictPath, 'w', encoding='utf8') for k,v in sorted(wordcount.items()): outFile.write(str(k))", "+= 1 print(u'Total number of chats: {0:d}'.format(c)) jsonFile.close() outFile.close() # The intermediate file", "of frequency and write to file outFile = open(mainFreqPath, 'w', encoding='utf8') for k,", "open(tempPath,'w', encoding='utf8') # 'p' is the variable that contains all the data p" ]
[ "1 def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir)", "str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type = str, nargs =", "{value}'.format(idx = i + 1, key = key, value = v)) # user", "help') parser.add_argument('-d', '--delta', type = int, help='delta minute help') args = parser.parse_args() if", "= [] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir)", "Counter from tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res:", "{value}'.format(idx = i, key = key, value = v)) def get_users_count_info(danmu_file): users_count_info =", "parser.add_argument('-ca', '--countall', type = str, nargs = '+', help='count help') parser.add_argument('-f', '--file', type", "= 0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 # init", "+= add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) if __name__", "= get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info = {} for", "argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c', '--count', type = str, nargs", "dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in", "res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return '' def get_danmu_time(line): res", "os import argparse from collections import Counter from tqdm import tqdm def get_danmu_user_name(line):", "tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0]", "return res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return", "collections import Counter from tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line)", "user_name = get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] = [0] * delta_idx_total", "nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type = str, nargs = '+',", "users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = []", "add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count", "def print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta = 30 else: delta", "danmu count top 50 print('user danmu count top 50:') for i in range(50):", "print('user stay top 50(max {max}):'.format(max = delta_max)) for i in range(50): key =", "sorted_list def sort_by_count(users_info): users_count_info = {} for user in users_info: stay_count = 0", "danmu_delta_minute): users_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as", "return users_info def sort_by_stay(users_info): users_stay_info = {} for user in users_info: stay_count =", "init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute", "= sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value = v)) def", "'+', help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta', type", "idx = idx + 1, key = key, value = v)) idx +=", "in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key", "return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return", "in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user count", "danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx]", "users_count_info def count_info(args): users_count_info = Counter({}) for f in args.count: if os.path.exists(f): add_count_info", "1) * danmu_delta_minute * 60 # init all user danmu dict danmu_total_time =", "print('user danmu count top 50:') for i in range(50): key = sorted_count_info[i][0] v", "v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value = v))", "danmaku file.') parser.add_argument('-c', '--count', type = str, nargs = '+', help='count help') parser.add_argument('-ca',", "int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for danmu in danmu_list: user_name =", "= sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value", "delta_idx = 0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 #", "danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx", "danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for", "if not (user_name in users_info): users_info[user_name] = [0] * delta_idx_total for danmu in", "coding: utf-8 -*- import re import os import argparse from collections import Counter", "0 else: users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info = Counter({}) for", "= re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return '' def get_danmu_time(line): res =", "-*- coding: utf-8 -*- import re import os import argparse from collections import", "# init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time /", "danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file))", "'--countall', type = str, nargs = '+', help='count help') parser.add_argument('-f', '--file', type =", "count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get User", "for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list):", "count print('Total user count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser =", "= sort_by_count(users_info) # user count print('Total user count: {count}'.format(count = len(users_info))) # user", "os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total user count:", "args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total", "import os import argparse from collections import Counter from tqdm import tqdm def", "{count}'.format(count = len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key = lambda x:", "parser = argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c', '--count', type =", "dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max", "def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return '' def", "delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx", "i in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i,", "users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info =", "argparse from collections import Counter from tqdm import tqdm def get_danmu_user_name(line): res =", "reverse=True) for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx", "get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] = [0] * delta_idx_total for danmu", "len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True)", "lambda x: x[1], reverse=True) for i in range(50): key = sorted_count_info[i][0] v =", "= 0 for dm_count in users_info[user]: if not dm_count == 0: stay_count +=", "users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return", "in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i +", "type = str, help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta minute help')", "str, nargs = '+', help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku help')", "'--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta minute", "help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta minute help') args = parser.parse_args()", "<gh_stars>1-10 #!/usr/bin/python3 # -*- coding: utf-8 -*- import re import os import argparse", "return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = [] with open(danmu_file,", "# user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max", "danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1", "users_count_info = {} for user in users_info: stay_count = 0 for dm_count in", "def count_info(args): users_count_info = Counter({}) for f in args.count: if os.path.exists(f): add_count_info =", "re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.',", "stay top 50(max {max}):'.format(max = delta_max)) for i in range(50): key = sorted_stay_info[i][0]", "1, key = key, value = v)) idx += 1 def count_all_info(args): users_count_info", "+= 1 return users_count_info def count_info(args): users_count_info = Counter({}) for f in args.count:", "value = v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with open(danmu_file,", "danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) +", "user in users_info: stay_count = 0 for dm_count in users_info[user]: if not dm_count", "= stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return sorted_list", "in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] = 0", "= Counter({}) for f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info +=", "lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {} for user", "count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key = lambda", "minute help') args = parser.parse_args() if args.count: count_info(args) elif args.file: users_info(args) elif args.countall:", "key = lambda x: x[1], reverse=True) for i in range(50): key = sorted_count_info[i][0]", "sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key, value = v))", "'__main__': parser = argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c', '--count', type", "danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60", "1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True)", "= os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs:", "= os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info +=", "0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 # init all", "os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files", "* delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max:", "danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user", "description='Get User info from danmaku file.') parser.add_argument('-c', '--count', type = str, nargs =", "range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1,", "= os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file", "nargs = '+', help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d',", "count print('Total user count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(),", "res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute):", "i + 1, key = key, value = v)) # user stay top", "+ 1) * danmu_delta_minute * 60 # init all user danmu dict danmu_total_time", "if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total user", "= delta_max)) for i in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}:", "errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu", "args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir =", "= int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for danmu in danmu_list: user_name", "= info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key", "if not dm_count == 0: stay_count += 1 users_stay_info[user] = stay_count sorted_list =", "danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for", "with open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0 for info in sorted_count_info:", "args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) #", "sort_by_count(users_info) # user count print('Total user count: {count}'.format(count = len(users_info))) # user danmu", "0 for dm_count in users_info[user]: if not dm_count == 0: stay_count += 1", "danmu_delta_minute * 60 # init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total", "danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 # init all user", "= v)) idx += 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(),", "= 0 for info in sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}:", "Counter({}) for f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info", "x[1], reverse=True) for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}:", "user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max =", "#!/usr/bin/python3 # -*- coding: utf-8 -*- import re import os import argparse from", "if res: return res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if", "get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info = {} for user", "30 else: delta = args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info)", "add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) if __name__ ==", "len(users_info))) # user danmu count top 50 print('user danmu count top 50:') for", "dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file)", "print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user", "users_stay_info = {} for user in users_info: stay_count = 0 for dm_count in", "parser.add_argument('-c', '--count', type = str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type", "User info from danmaku file.') parser.add_argument('-c', '--count', type = str, nargs = '+',", "line): danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in", "re import os import argparse from collections import Counter from tqdm import tqdm", "errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx =", "help') parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta', type = int,", "else: users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info = Counter({}) for f", "dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx + 1)", "delta = args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info =", "sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for i in range(50):", "sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info):", "dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total", "sort count sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for i", "= v)) # user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top", "users_info): users_info[user_name] = [0] * delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu)", "utf-8 -*- import re import os import argparse from collections import Counter from", "= str, nargs = '+', help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku", "1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info", "danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max =", "= get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] = [0] * delta_idx_total for", "dm_count in users_info[user]: if not dm_count == 0: stay_count += 1 users_stay_info[user] =", "for user in users_info: stay_count = 0 for dm_count in users_info[user]: if not", "print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key, value = v)) with", "user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info = {}", "'' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1", "if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not", "= sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total user count: {count}'.format(count =", "1 return users_info def sort_by_stay(users_info): users_stay_info = {} for user in users_info: stay_count", "return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = []", "= len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max)) for i in range(50):", "danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info", "len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get User info from danmaku", "def sort_by_count(users_info): users_count_info = {} for user in users_info: stay_count = 0 for", "value = v)) idx += 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir =", "+ 1, key = key, value = v)) idx += 1 def count_all_info(args):", "== 0: stay_count += 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key =", "type = str, nargs = '+', help='count help') parser.add_argument('-f', '--file', type = str,", "stay_count += dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key = lambda x:", "str, help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta minute help') args =", "delta_max)) for i in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx", "res: return res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res:", "from danmaku file.') parser.add_argument('-c', '--count', type = str, nargs = '+', help='count help')", "help') parser.add_argument('-ca', '--countall', type = str, nargs = '+', help='count help') parser.add_argument('-f', '--file',", "info in sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx =", "# user danmu count top 50 print('user danmu count top 50:') for i", "encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for", "(user_name in users_info): users_info[user_name] = [0] * delta_idx_total for danmu in danmu_list: danmu_time", "tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total user count:", "stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def", "if not (user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return", "= re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info", "help='count help') parser.add_argument('-ca', '--countall', type = str, nargs = '+', help='count help') parser.add_argument('-f',", "not (user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return users_count_info", "= len(users_info))) # user danmu count top 50 print('user danmu count top 50:')", "encoding='utf-8') as count_file: idx = 0 for info in sorted_count_info: key = info[0]", "= int, help='delta minute help') args = parser.parse_args() if args.count: count_info(args) elif args.file:", "* 60)) + 1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not", "line) if res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {}", "danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60", "delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for danmu in danmu_list:", "== '__main__': parser = argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c', '--count',", "print('Total user count: {count}'.format(count = len(users_info))) # user danmu count top 50 print('user", "pass def users_info(args): if not args.delta: delta = 30 else: delta = args.delta", "re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute", "danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in", "dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True)", "count print('Total user count: {count}'.format(count = len(users_info))) # user danmu count top 50", "tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return ''", "f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info # user", "key = lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args):", "danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info):", "get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for danmu in", "[] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for", "in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx +", "delta_idx += 1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 user_name", "Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info)))", "user count print('Total user count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info =", "idx = 0 for info in sorted_count_info: key = info[0] v = info[1]", "reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {} for user in users_info: stay_count", "= lambda x: x[1], reverse=True) for i in range(50): key = sorted_count_info[i][0] v", "key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1,", "'--delta', type = int, help='delta minute help') args = parser.parse_args() if args.count: count_info(args)", "get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx + 1)", "top 50 print('user danmu count top 50:') for i in range(50): key =", ">= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute *", "users_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file:", "users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info = {} for user in", "{max}):'.format(max = delta_max)) for i in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1]", "+= 1 return users_info def sort_by_stay(users_info): users_stay_info = {} for user in users_info:", "= {} for user in users_info: stay_count = 0 for dm_count in users_info[user]:", "if not args.delta: delta = 30 else: delta = args.delta print(args.file) users_info =", "60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info =", "os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir", "danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] =", "sort_by_stay(users_info): users_stay_info = {} for user in users_info: stay_count = 0 for dm_count", "1, key = key, value = v)) with open('counts.txt', 'w', encoding='utf-8') as count_file:", "in users_info): users_info[user_name] = [0] * delta_idx_total for danmu in danmu_list: danmu_time =", "for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name", "for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max =", "= sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass", "if res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list", "= stay_count sorted_list = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return sorted_list", "for info in sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx", "Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info)))", "if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx + 1) *", "60 # init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time", "= Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count =", "get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1", "in sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx", "= os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir,", "= i, key = key, value = v)) def get_users_count_info(danmu_file): users_count_info = {}", "key = key, value = v)) # user stay top 50 delta_max =", "= sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info", "import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return", "users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info", "for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file", "file.') parser.add_argument('-c', '--count', type = str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall',", "x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta", "danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute", "= sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for i in range(50): key", "= idx + 1, key = key, value = v)) idx += 1", "60)) + 1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name", "= Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for", "danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file =", "= key, value = v)) # user stay top 50 delta_max = len(list(users_info.items())[0][1])", "info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key = key, value =", "dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info", "# -*- coding: utf-8 -*- import re import os import argparse from collections", "count top 50 print('user danmu count top 50:') for i in range(50): key", "* 60 # init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total =", "if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx + 1) *", "get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1 def get_users_info(danmu_file,", "danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files =", "+ 1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in", "in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files:", "dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if", "re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info =", "(delta_idx + 1) * danmu_delta_minute * 60 # init all user danmu dict", "sorted_count_info = sort_by_count(users_info) # user count print('Total user count: {count}'.format(count = len(users_info))) #", "key, value = v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0", "count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key = key, value = v))", "not dm_count == 0: stay_count += dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(),", "line) if res: return res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line)", "os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file)", "users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count", "i + 1, key = key, value = v)) with open('counts.txt', 'w', encoding='utf-8')", "sorted_list def print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta = 30 else:", "-1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = [] with open(danmu_file, 'r',", "in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu)", "stay_count sorted_list = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def", "top 50:') for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}:", "= key, value = v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx =", "'w', encoding='utf-8') as count_file: idx = 0 for info in sorted_count_info: key =", "danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] = 0 else:", "# user count print('Total user count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__':", "if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get User info from danmaku file.')", "int, help='delta minute help') args = parser.parse_args() if args.count: count_info(args) elif args.file: users_info(args)", "x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {} for user in users_info:", "+ 1, key = key, value = v)) with open('counts.txt', 'w', encoding='utf-8') as", "50 print('user danmu count top 50:') for i in range(50): key = sorted_count_info[i][0]", "v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8',", "# sort count sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for", "get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return res[0] return '' def get_danmu_time(line):", "not dm_count == 0: stay_count += 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(),", "'r', encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line)", "args.delta: delta = 30 else: delta = args.delta print(args.file) users_info = get_users_info(args.file, delta)", "user count: {count}'.format(count = len(users_info))) # user danmu count top 50 print('user danmu", "len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max)) for i in range(50): key", "info from danmaku file.') parser.add_argument('-c', '--count', type = str, nargs = '+', help='count", "print('Total user count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key", "get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore')", "= Counter(get_users_count_info(f)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count =", "return sorted_list def sort_by_count(users_info): users_count_info = {} for user in users_info: stay_count =", "int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = [] with", "as count_file: idx = 0 for info in sorted_count_info: key = info[0] v", "key = key, value = v)) idx += 1 def count_all_info(args): users_count_info =", "= {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for", "danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] =", "all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute *", "info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key =", "sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def", "50:') for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx", "user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60))", "count top 50:') for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1]", "= key, value = v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list = []", "= 0 else: users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info = Counter({})", "in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total user", "{count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get User info", "sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total user count: {count}'.format(count = len(users_info)))", "from tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if res: return", "not (user_name in users_info): users_info[user_name] = [0] * delta_idx_total for danmu in danmu_list:", "key = key, value = v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list =", "= (delta_idx + 1) * danmu_delta_minute * 60 # init all user danmu", "x: x[1], reverse=True) for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1]", "v)) idx += 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0])", "0 for info in sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format(", "for dm_count in users_info[user]: if not dm_count == 0: stay_count += dm_count users_count_info[user]", "encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx", "0: stay_count += dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key = lambda", "sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value =", "= str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type = str, nargs", "for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count", "sorted_count_info: key = info[0] v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx +", "= str, help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta minute help') args", "help') args = parser.parse_args() if args.count: count_info(args) elif args.file: users_info(args) elif args.countall: count_all_info(args)", "sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value = v)) def get_users_count_info(danmu_file):", "1, key = key, value = v)) # user stay top 50 delta_max", "= get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1 for danmu", "= i + 1, key = key, value = v)) # user stay", "1 return users_count_info def count_info(args): users_count_info = Counter({}) for f in args.count: if", "in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max", "for i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx =", "for dm_count in users_info[user]: if not dm_count == 0: stay_count += 1 users_stay_info[user]", "stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max))", "+= dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key = lambda x: x[1],", "= args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info)", "idx += 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs", "return users_count_info def count_info(args): users_count_info = Counter({}) for f in args.count: if os.path.exists(f):", "else: delta = args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info", "for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name]", "+= 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1],", "def sort_by_stay(users_info): users_stay_info = {} for user in users_info: stay_count = 0 for", "{} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line", "in users_info: stay_count = 0 for dm_count in users_info[user]: if not dm_count ==", "key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key", "danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info):", "# user count print('Total user count: {count}'.format(count = len(users_info))) # user danmu count", "import Counter from tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">', line) if", "print('Total user count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser(", "* 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def sort_by_stay(users_info): users_stay_info", "type = int, help='delta minute help') args = parser.parse_args() if args.count: count_info(args) elif", "= len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get User info from", "user count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key =", "def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0]) return -1 def", "return sorted_list def print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta = 30", "users_count_info += add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) if", "= get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx += 1 danmu_delta_max = (delta_idx +", "1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu)", "for f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info += add_count_info #", "users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return", "50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max)) for i", "from collections import Counter from tqdm import tqdm def get_danmu_user_name(line): res = re.findall(r',\\d{1,2},([^,]+),[^,]*\">',", "[0] * delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >=", "dm_count == 0: stay_count += 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key", "print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta = 30 else: delta =", "= lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {} for", "danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir,", "parser.add_argument('-d', '--delta', type = int, help='delta minute help') args = parser.parse_args() if args.count:", "sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key,", "(user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return users_count_info def", "= sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key, value =", "count_info(args): users_count_info = Counter({}) for f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f))", "[] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if", "users_info[user_name] = [0] * delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if", "== 0: stay_count += dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key =", "print('{idx}\\t{key}: {value}'.format(idx = i, key = key, value = v)) def get_users_count_info(danmu_file): users_count_info", "+ 1, key = key, value = v)) # user stay top 50", "stay_count += 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key = lambda x:", "50(max {max}):'.format(max = delta_max)) for i in range(50): key = sorted_stay_info[i][0] v =", "import argparse from collections import Counter from tqdm import tqdm def get_danmu_user_name(line): res", "dm_count == 0: stay_count += dm_count users_count_info[user] = stay_count sorted_list = sorted(users_count_info.items(), key", "os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in", "get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total user", "Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list = [] for dm_dir", "v)) # user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max", "/ (danmu_delta_minute * 60)) + 1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu)", "danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info #", "users_info def sort_by_stay(users_info): users_stay_info = {} for user in users_info: stay_count = 0", "line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute *", "idx + 1, key = key, value = v)) idx += 1 def", "sorted(users_stay_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info =", "key = lambda x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {}", "= '+', help='count help') parser.add_argument('-ca', '--countall', type = str, nargs = '+', help='count", "= '+', help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta',", "= len(users_count_info))) # sort count sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1],", "user danmu count top 50 print('user danmu count top 50:') for i in", "0 for dm_count in users_info[user]: if not dm_count == 0: stay_count += dm_count", "v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0 for info in", "parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta', type = int, help='delta", "{} for user in users_info: stay_count = 0 for dm_count in users_info[user]: if", "danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir) danmu_files = os.listdir(danmu_file_dir) for danmu_file in danmu_files: dm_file", "count sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for i in", "= 30 else: delta = args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info =", "users_count_info += add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) #", "= get_users_info(args.file, delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total", "+ 1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return", "= sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key =", "in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] += 1 return users_count_info def count_info(args):", "users_count_info[user_name] += 1 return users_count_info def count_info(args): users_count_info = Counter({}) for f in", "in danmu_files: dm_file = os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info =", "= info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key = key, value", "= (delta_idx + 1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] +=", "__name__ == '__main__': parser = argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c',", "reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args): if not args.delta: delta =", "line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0 danmu_delta_max = (delta_idx", "as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in", "user_name = get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name]", "users_count_info = Counter({}) for f in args.count: if os.path.exists(f): add_count_info = Counter(get_users_count_info(f)) users_count_info", "top 50(max {max}):'.format(max = delta_max)) for i in range(50): key = sorted_stay_info[i][0] v", "os.path.join(danmu_file_dir, danmu_file) danmu_file_list.append(dm_file) for dm_file in tqdm(danmu_file_list): add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info", "(delta_idx + 1) * danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1", "range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key =", "user count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Get", "= [0] * delta_idx_total for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time", "in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] = [0]", "users_count_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file:", "not args.delta: delta = 30 else: delta = args.delta print(args.file) users_info = get_users_info(args.file,", "users_info[user]: if not dm_count == 0: stay_count += dm_count users_count_info[user] = stay_count sorted_list", "help='delta minute help') args = parser.parse_args() if args.count: count_info(args) elif args.file: users_info(args) elif", "= os.listdir(danmu_dir) danmu_file_list = [] for dm_dir in danmu_dirs: danmu_file_dir = os.path.join(danmu_dir, dm_dir)", "sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total user count: {count}'.format(count", "type = str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type = str,", "x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args): if not args.delta:", "+= 1 danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60 user_name =", "danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line in", "in users_info[user]: if not dm_count == 0: stay_count += 1 users_stay_info[user] = stay_count", "for danmu in danmu_list: danmu_time = get_danmu_time(danmu) if danmu_time >= danmu_delta_max: delta_idx +=", "get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as", "def users_info(args): if not args.delta: delta = 30 else: delta = args.delta print(args.file)", "key = key, value = v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx", "in users_info[user]: if not dm_count == 0: stay_count += dm_count users_count_info[user] = stay_count", "sorted_list = sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info):", "key, value = v)) # user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user", "{count}'.format(count = len(users_info))) # user danmu count top 50 print('user danmu count top", "user count print('Total user count: {count}'.format(count = len(users_info))) # user danmu count top", "= v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with open(danmu_file, 'r',", "def get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore')", "delta) sorted_stay_info = sort_by_stay(users_info) sorted_count_info = sort_by_count(users_info) # user count print('Total user count:", "# user count print('Total user count: {count}'.format(count = len(users_count_info))) # sort count sorted_count_info", "stay_count = 0 for dm_count in users_info[user]: if not dm_count == 0: stay_count", "re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name", "add_count_info = Counter(get_users_count_info(dm_file)) users_count_info += add_count_info # user count print('Total user count: {count}'.format(count", "dm_count in users_info[user]: if not dm_count == 0: stay_count += dm_count users_count_info[user] =", "= lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args): if", "key, value = v)) idx += 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir", "sorted(users_count_info.items(), key = lambda x: x[1], reverse=True) for i in range(50): key =", "x: x[1], reverse=True) return sorted_list def sort_by_count(users_info): users_count_info = {} for user in", "add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) # sort count", "sort_by_count(users_info): users_count_info = {} for user in users_info: stay_count = 0 for dm_count", "count: {count}'.format(count = len(users_info))) # user danmu count top 50 print('user danmu count", "delta = 30 else: delta = args.delta print(args.file) users_info = get_users_info(args.file, delta) sorted_stay_info", "for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name]", "lambda x: x[1], reverse=True) return sorted_list def print_sorted_info(users_info): pass def users_info(args): if not", "res[0] return '' def get_danmu_time(line): res = re.findall(r'p=\"(\\d+)\\.', line) if res: return int(res[0])", "value = v)) # user stay top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay", "'--count', type = str, nargs = '+', help='count help') parser.add_argument('-ca', '--countall', type =", "{value}'.format(idx = i + 1, key = key, value = v)) with open('counts.txt',", "open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line):", "users_info: stay_count = 0 for dm_count in users_info[user]: if not dm_count == 0:", "= key, value = v)) idx += 1 def count_all_info(args): users_count_info = Counter({})", "dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list:", "help='count help') parser.add_argument('-f', '--file', type = str, help='danmaku help') parser.add_argument('-d', '--delta', type =", "danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_info): users_info[user_name] = [0] *", "users_info(args): if not args.delta: delta = 30 else: delta = args.delta print(args.file) users_info", "as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) delta_idx = 0", "line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">', line): danmu_list.append(line) for danmu in danmu_list: user_name =", "* danmu_delta_minute * 60 user_name = get_danmu_user_name(danmu) users_info[user_name][delta_idx] += 1 return users_info def", "value = v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0 for", "delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max)) for i in", "-*- import re import os import argparse from collections import Counter from tqdm", "= v)) with open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0 for info", "user count print('Total user count: {count}'.format(count = len(users_count_info))) if __name__ == '__main__': parser", "i in range(50): key = sorted_count_info[i][0] v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i", "= argparse.ArgumentParser( description='Get User info from danmaku file.') parser.add_argument('-c', '--count', type = str,", "i, key = key, value = v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list", "danmu count top 50:') for i in range(50): key = sorted_count_info[i][0] v =", "import re import os import argparse from collections import Counter from tqdm import", "open('counts.txt', 'w', encoding='utf-8') as count_file: idx = 0 for info in sorted_count_info: key", "key, value = v)) def get_users_count_info(danmu_file): users_count_info = {} danmu_list = [] with", "users_info[user]: if not dm_count == 0: stay_count += 1 users_stay_info[user] = stay_count sorted_list", "top 50 delta_max = len(list(users_info.items())[0][1]) print('user stay top 50(max {max}):'.format(max = delta_max)) for", "= i + 1, key = key, value = v)) with open('counts.txt', 'w',", "= [] with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line in dm_file:", "{value}\\n'.format( idx = idx + 1, key = key, value = v)) idx", "* danmu_delta_minute * 60 # init all user danmu dict danmu_total_time = get_danmu_time(danmu_list[-1])", "v = sorted_count_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key, value", "count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list =", "v = info[1] count_file.write('{idx}\\t{key}: {value}\\n'.format( idx = idx + 1, key = key,", "dict danmu_total_time = get_danmu_time(danmu_list[-1]) delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1", "def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs = os.listdir(danmu_dir) danmu_file_list", "0: stay_count += 1 users_stay_info[user] = stay_count sorted_list = sorted(users_stay_info.items(), key = lambda", "key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx = i, key = key,", "= get_danmu_user_name(danmu) if not (user_name in users_count_info): users_count_info[user_name] = 0 else: users_count_info[user_name] +=", "def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list = [] with open(danmu_file, 'r', encoding='utf-8',", "1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if not (user_name in users_info):", "(danmu_delta_minute * 60)) + 1 for danmu in danmu_list: user_name = get_danmu_user_name(danmu) if", "with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file: for line in dm_file: if re.findall(r',\\d{1,2},([^,]+?),[^,]*\">',", "print('{idx}\\t{key}: {value}'.format(idx = i + 1, key = key, value = v)) #", "for i in range(50): key = sorted_stay_info[i][0] v = sorted_stay_info[i][1] print('{idx}\\t{key}: {value}'.format(idx =", "+= add_count_info # user count print('Total user count: {count}'.format(count = len(users_count_info))) # sort", "'+', help='count help') parser.add_argument('-ca', '--countall', type = str, nargs = '+', help='count help')", "res: return int(res[0]) return -1 def get_users_info(danmu_file, danmu_delta_minute): users_info = {} danmu_list =", "+= 1 def count_all_info(args): users_count_info = Counter({}) danmu_dir = os.path.join(os.getcwd(), args.countall[0]) danmu_dirs =", "count_file: idx = 0 for info in sorted_count_info: key = info[0] v =", "if not dm_count == 0: stay_count += dm_count users_count_info[user] = stay_count sorted_list =" ]
[ "# initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] = []", "# add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}')", "def train(midi): for i, msg in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize", "for i, msg in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note)", "{} note_velocities = {} note_neighbors = {} note_times = {} note_successors = {}", "initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] = [] #", "<gh_stars>0 import random import mido from mido import MidiFile # what things to", "[] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes:", "from mido import MidiFile # what things to store? # random choice #", "channel_notes = {} note_velocities = {} note_neighbors = {} note_times = {} note_successors", "= {} note_times = {} note_successors = {} def train(midi): for i, msg", "store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries #", "notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] = [] # add data", "import mido from mido import MidiFile # what things to store? # random", "https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note :", "# https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes = {} note_velocities =", "{} def train(midi): for i, msg in enumerate(midi): try: if not notes.__contains__(msg.note): #", "{} note_neighbors = {} note_times = {} note_successors = {} def train(midi): for", "# what things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = []", "dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes = {} note_velocities", "train(midi): for i, msg in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists", "mido import MidiFile # what things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python", "https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes = {} note_velocities = {}", "= [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity", "import MidiFile # what things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes", "random import mido from mido import MidiFile # what things to store? #", "recorded_data[] } channel_notes = {} note_velocities = {} note_neighbors = {} note_times =", ": recorded_data[] } channel_notes = {} note_velocities = {} note_neighbors = {} note_times", "note_velocities = {} note_neighbors = {} note_times = {} note_successors = {} def", "MidiFile # what things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes =", "# { note : recorded_data[] } channel_notes = {} note_velocities = {} note_neighbors", "to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries", "choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # {", "= {} def train(midi): for i, msg in enumerate(midi): try: if not notes.__contains__(msg.note):", "note_successors = {} def train(midi): for i, msg in enumerate(midi): try: if not", "[] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys:", "= [] note_successors[msg.note] = [] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time)", "random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp #", "{} note_times = {} note_successors = {} def train(midi): for i, msg in", "note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}')", "note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time keys: {len(note_times.keys())}') print(f'Successors", "notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] =", "try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] =", "note_successors[msg.note] = [] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except:", "note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity)", "= [] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0", "# dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes = {}", "enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note]", "{ note : recorded_data[] } channel_notes = {} note_velocities = {} note_neighbors =", "mido from mido import MidiFile # what things to store? # random choice", "note : recorded_data[] } channel_notes = {} note_velocities = {} note_neighbors = {}", "if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = []", "things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) #", "= [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] }", "note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time keys: {len(note_times.keys())}')", "in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = []", "[] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes", "notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[]", "= {} note_neighbors = {} note_times = {} note_successors = {} def train(midi):", "data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time keys:", "i, msg in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note]", "lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note] = [] # add", "[] note_successors[msg.note] = [] note_times[msg.note] = [] # add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note)", "import random import mido from mido import MidiFile # what things to store?", "= {} note_successors = {} def train(midi): for i, msg in enumerate(midi): try:", "{} note_successors = {} def train(midi): for i, msg in enumerate(midi): try: if", "= {} note_velocities = {} note_neighbors = {} note_times = {} note_successors =", "# random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp", "note_times = {} note_successors = {} def train(midi): for i, msg in enumerate(midi):", "what things to store? # random choice # https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes)", "msg in enumerate(midi): try: if not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] =", "note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time keys: {len(note_times.keys())}') print(f'Successors keys:", "add data note_velocities[msg.note].append(msg.velocity) note_times[msg.note].append(msg.time) note_successors[msg.note].append(midi[i+1].note) except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time", "} channel_notes = {} note_velocities = {} note_neighbors = {} note_times = {}", "not notes.__contains__(msg.note): # initialize lists notes.append(msg.note) note_velocities[msg.note] = [] note_successors[msg.note] = [] note_times[msg.note]", "except: i=0 print(f'Notes: {len(notes)}') print(f'Velocity keys: {len(note_velocities.keys())}') print(f'Time keys: {len(note_times.keys())}') print(f'Successors keys: {len(note_successors.keys())}')", "#random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note : recorded_data[] } channel_notes =", "# https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python notes = [] #random.choice(notes) # dictionaries # https://www.w3schools.com/python/python_dictionaries.asp # { note", "note_neighbors = {} note_times = {} note_successors = {} def train(midi): for i," ]
[ "os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna && protoc", "relevant protobufs self.compile_proto() # Run the standard PyPi copy install.run(self) # remove the", "anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup(", "def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for", "--python_out=. ' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto')", "# compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto')", "' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache", "anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto", "setuptools.command.install import install class InstallWrapper(install): def run(self): # compile the relevant protobufs self.compile_proto()", "os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py')", "cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the", "compile the relevant protobufs self.compile_proto() # Run the standard PyPi copy install.run(self) #", "Run the standard PyPi copy install.run(self) # remove the compiled protobufs self.cleanup() def", "self.compile_proto() # Run the standard PyPi copy install.run(self) # remove the compiled protobufs", "-I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +", "compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd", "class InstallWrapper(install): def run(self): # compile the relevant protobufs self.compile_proto() # Run the", "'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm", "anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the Anna KVS',", "protobufs self.compile_proto() # Run the standard PyPi copy install.run(self) # remove the compiled", "name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the Anna KVS', install_requires=['zmq', 'protobuf'],", "# remove the compiled protobufs self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd", "protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna &&", "--python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ],", "os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the Anna", "from setuptools.command.install import install class InstallWrapper(install): def run(self): # compile the relevant protobufs", "def run(self): # compile the relevant protobufs self.compile_proto() # Run the standard PyPi", "self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=.", "], license='Apache v2', long_description='Client for the Anna KVS', install_requires=['zmq', 'protobuf'], cmdclass={'install': InstallWrapper} )", "setup import os from setuptools.command.install import install class InstallWrapper(install): def run(self): # compile", "compiled protobufs self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd anna && protoc", "+ 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2',", "protobufs self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto", "+ 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self):", "the compiled protobufs self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd anna &&", "'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client", "# Run the standard PyPi copy install.run(self) # remove the compiled protobufs self.cleanup()", "compile_proto(self): # compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +", "distutils.core import setup import os from setuptools.command.install import install class InstallWrapper(install): def run(self):", "InstallWrapper(install): def run(self): # compile the relevant protobufs self.compile_proto() # Run the standard", "' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def", "version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the Anna KVS', install_requires=['zmq', 'protobuf'], cmdclass={'install':", "copy install.run(self) # remove the compiled protobufs self.cleanup() def compile_proto(self): # compile the", "the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna", "-I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1', packages=['anna',", "protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna', version='0.1',", "# compile the relevant protobufs self.compile_proto() # Run the standard PyPi copy install.run(self)", "the standard PyPi copy install.run(self) # remove the compiled protobufs self.cleanup() def compile_proto(self):", "<gh_stars>1000+ from distutils.core import setup import os from setuptools.command.install import install class InstallWrapper(install):", "install.run(self) # remove the compiled protobufs self.cleanup() def compile_proto(self): # compile the protobufs", "protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=. '", "standard PyPi copy install.run(self) # remove the compiled protobufs self.cleanup() def compile_proto(self): #", "import install class InstallWrapper(install): def run(self): # compile the relevant protobufs self.compile_proto() #", "install class InstallWrapper(install): def run(self): # compile the relevant protobufs self.compile_proto() # Run", "os from setuptools.command.install import install class InstallWrapper(install): def run(self): # compile the relevant", "def compile_proto(self): # compile the protobufs os.system('cd anna && protoc -I=../../../../include/proto --python_out=. '", "setup( name='Anna', version='0.1', packages=['anna', ], license='Apache v2', long_description='Client for the Anna KVS', install_requires=['zmq',", "PyPi copy install.run(self) # remove the compiled protobufs self.cleanup() def compile_proto(self): # compile", "the relevant protobufs self.compile_proto() # Run the standard PyPi copy install.run(self) # remove", "import setup import os from setuptools.command.install import install class InstallWrapper(install): def run(self): #", "remove the compiled protobufs self.cleanup() def compile_proto(self): # compile the protobufs os.system('cd anna", "&& protoc -I=../../../../include/proto --python_out=. ' + 'kvs.proto') os.system('cd anna && protoc -I=../../../../include/proto --python_out=.", "packages=['anna', ], license='Apache v2', long_description='Client for the Anna KVS', install_requires=['zmq', 'protobuf'], cmdclass={'install': InstallWrapper}", "run(self): # compile the relevant protobufs self.compile_proto() # Run the standard PyPi copy", "import os from setuptools.command.install import install class InstallWrapper(install): def run(self): # compile the", "from distutils.core import setup import os from setuptools.command.install import install class InstallWrapper(install): def", "&& protoc -I=../../../../include/proto --python_out=. ' + 'functions.proto') def cleanup(self): os.system('rm anna/kvs_pb2.py') setup( name='Anna'," ]
[ "class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error) # Non-registered", "-> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead ->", "Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji", "name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command - used by ravenkeeper", "ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx,", "json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation =", "commands of this cog. Must be a non-fleaved player to use these commands.", "LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name = \"learn\", hidden =", "check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel,", "= json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command", "cog. Must be a non-fleaved player to use these commands. \"\"\" return check_if_is_player(ctx)", "player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await", "DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str =", "ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument):", "DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing", "check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand,", "= documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command - used by ravenkeeper \"\"\"", "\" \" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise", "import botutils import discord import traceback import json from discord.ext import commands from", "player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect", "@commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()):", "isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not dead", "Registered non-quit player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context", "isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight elif isinstance(error, NotDawn): try: await", "learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and <player> and...", "\"\"\"Learn command\"\"\" import botutils import discord import traceback import json from discord.ext import", "documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel ->", "character -> RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage:", "\" \" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await ctx.author.send(msg) except", "error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name", "Incorrect phase -> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden:", "this cog. Must be a non-fleaved player to use these commands. \"\"\" return", "*, learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and <player> and... characters: ravenkeeper", "botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer,", "check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as", "player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error except Exception:", "= json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class", "False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) #", "player -> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel elif", "isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error) # Non-registered or quit", "isinstance(error, RoleCannotUseCommand): return # If it passed all the checks but raised an", "use these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer #", "return # Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase", "of this cog. Must be a non-fleaved player to use these commands. \"\"\"", "-> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name", "or quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel ->", "elif isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight elif isinstance(error, NotDawn): try:", "Non-registered or quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel", "\\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language =", "except discord.Forbidden: pass else: try: raise error except Exception: await ctx.send(error_str) await botutils.log(botutils.Level.error,", "elif isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return", "description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) # Correct", "character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it passed all the", "Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def learn(self,", "\"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error):", "- used by ravenkeeper \"\"\" def __init__(self, client): self.client = client def cog_check(self,", "async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and", "as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands", "-> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel elif isinstance(error,", "error = getattr(error, 'original', error) await ctx.send(error) # Non-registered or quit player ->", "player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else:", "help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight", "\"\"\" def __init__(self, client): self.client = client def cog_check(self, ctx): \"\"\"Check performed on", "NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player", "@commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn)", "__init__(self, client): self.client = client def cog_check(self, ctx): \"\"\"Check performed on all commands", "player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \"", "= language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name =", "PlayerParser()): \"\"\"Learn command usage: learn <player> and <player> and... characters: ravenkeeper \"\"\" player", "check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ----------------------------------------", "# Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji))", "NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument):", "RoleCannotUseCommand): return # If it passed all the checks but raised an error", "Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight", "argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji +", "@commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead)", "import commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser,", "AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error) # Non-registered or quit player", "and <player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error", "# Registered non-quit player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command(", "ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand):", "if isinstance(error, RoleCannotUseCommand): return # If it passed all the checks but raised", "try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif", "= \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description =", "\"\"\" return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer # ---------- LEARN COMMAND", "ctx.send(error) # Non-registered or quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return #", "ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not dead -> DeadOnlyCommand elif isinstance(error,", "commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight elif isinstance(error, NotDawn):", "True, name = \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"],", "elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument", "RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn <player>", "argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight elif", "= documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm)", "elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error) # Non-registered or", "name = \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description", "ctx): \"\"\"Check performed on all commands of this cog. Must be a non-fleaved", "COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name = \"learn\", hidden = False,", "\" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden:", "\"\"\"Check performed on all commands of this cog. Must be a non-fleaved player", "be a non-fleaved player to use these commands. \"\"\" return check_if_is_player(ctx) # Registered", "-> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def learn(self, ctx, *,", "passed all the checks but raised an error in the character class elif", "Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument", "# Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def", "discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id)", "discord.Forbidden: pass else: try: raise error except Exception: await ctx.send(error_str) await botutils.log(botutils.Level.error, traceback.format_exc())", "error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return", "these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer # ----------", "return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper)", "commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \"", "= getattr(error, 'original', error) await ctx.send(error) # Non-registered or quit player -> NotAPlayer", "\"\"\"Learn command usage: learn <player> and <player> and... characters: ravenkeeper \"\"\" player =", "from discord.ext import commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead,", "json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog,", "return # Incorrect phase -> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji))", "-> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass #", "= documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase", "error) await ctx.send(error) # Non-registered or quit player -> NotAPlayer elif isinstance(error, NotAPlayer):", "+ player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error except", "all the checks but raised an error in the character class elif isinstance(error,", "character class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error) #", "# ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name = \"learn\",", "await ctx.send(error) # Non-registered or quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return", "dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def learn(self, ctx,", "---------------------------------------- @commands.command( pass_context = True, name = \"learn\", hidden = False, brief =", "json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command -", "import traceback import json from discord.ext import commands from botc import check_if_is_player, check_if_is_night,", "return # If it passed all the checks but raised an error in", "commands.BadArgument): return # Incorrect phase -> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention,", "NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel):", "language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]):", "commands cog Learn command - used by ravenkeeper \"\"\" def __init__(self, client): self.client", "with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as", "NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"]", "elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" +", "-> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character ->", "emoji)) except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player", "except discord.Forbidden: pass # Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try:", "check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json')", "= documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel", "# Incorrect phase -> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except", "in the character class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await", "check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json')", "import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils,", "Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except", "phase -> NotNight elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass", "---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name = \"learn\", hidden", "commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer # ---------- LEARN", "await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not dead -> DeadOnlyCommand elif", "try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not dead -> DeadOnlyCommand", "DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async def learn(self, ctx, *, learned:", "Correct character -> RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command", "command\"\"\" import botutils import discord import traceback import json from discord.ext import commands", "Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player", "in-game commands cog Learn command - used by ravenkeeper \"\"\" def __init__(self, client):", "learn <player> and <player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player,", "used by ravenkeeper \"\"\" def __init__(self, client): self.client = client def cog_check(self, ctx):", "# Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument ->", "-> RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn", "phase -> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead", "pass else: try: raise error except Exception: await ctx.send(error_str) await botutils.log(botutils.Level.error, traceback.format_exc()) def", "async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand", "BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \" \" +", "to use these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer", "await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error,", "language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation = json.load(json_file)", "\\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with", "class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command - used", "ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and <player> and... characters:", "+ \" \" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else: try:", "NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand", "raised an error in the character class elif isinstance(error, AbilityForbidden): error = getattr(error,", "# Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) #", "error in the character class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error)", "RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it passed all the checks but", "channel -> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument elif", "= BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \" \"", "client def cog_check(self, ctx): \"\"\"Check performed on all commands of this cog. Must", "command - used by ravenkeeper \"\"\" def __init__(self, client): self.client = client def", "on all commands of this cog. Must be a non-fleaved player to use", "PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file:", "from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\", "except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player =", "BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"]", "@commands.command( pass_context = True, name = \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"],", "Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct", "+ \" \" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await ctx.author.send(msg)", "isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument ->", "# Non-registered or quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect", "json_file: documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog", "try: raise error except Exception: await ctx.send(error_str) await botutils.log(botutils.Level.error, traceback.format_exc()) def setup(client): client.add_cog(Learn(client))", "learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and <player> and... characters: ravenkeeper \"\"\"", "def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if", "def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn command usage: learn <player> and <player>", "discord.Forbidden: pass # Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await", "non-quit player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context =", "pass_context = True, name = \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help", "discord import traceback import json from discord.ext import commands from botc import check_if_is_player,", "Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command - used by", "player to use these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player ->", "learned) @learn.error async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character", "and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def", "def cog_check(self, ctx): \"\"\"Check performed on all commands of this cog. Must be", "-> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase -> NotNight elif isinstance(error,", "by ravenkeeper \"\"\" def __init__(self, client): self.client = client def cog_check(self, ctx): \"\"\"Check", "ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error except Exception: await ctx.send(error_str) await", "quit player -> NotAPlayer elif isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel", "Must be a non-fleaved player to use these commands. \"\"\" return check_if_is_player(ctx) #", "# Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg =", "not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden:", "(Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name = \"learn\", hidden = False, brief", "\"\"\"BoTC in-game commands cog Learn command - used by ravenkeeper \"\"\" def __init__(self,", "documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it", "= documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If", "player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error): emoji", "<player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async", "<player> and <player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned)", "else: try: raise error except Exception: await ctx.send(error_str) await botutils.log(botutils.Level.error, traceback.format_exc()) def setup(client):", "BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str", "open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game", "NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file)", "client): self.client = client def cog_check(self, ctx): \"\"\"Check performed on all commands of", "return # Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument", "import json from discord.ext import commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand,", "# If it passed all the checks but raised an error in the", "= player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action try:", "def __init__(self, client): self.client = client def cog_check(self, ctx): \"\"\"Check performed on all", "all commands of this cog. Must be a non-fleaved player to use these", "isinstance(error, NotAPlayer): return # Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return #", "-> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it passed all the checks", "dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass", "NotDMChannel): return # Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect", "commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden,", "hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] )", ") @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) # Correct channel -> NotDMChannel", "# Correct character -> RoleCannotUseCommand async def learn(self, ctx, *, learned: PlayerParser()): \"\"\"Learn", "discord.ext import commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn,", "\" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error", "pass # Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention,", "check_if_is_night, check_if_dm, RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn,", "= False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn)", "pass # Missing argument -> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg", "documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase ->", "NotAPlayer): return # Incorrect channel -> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect", "characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self,", "non-fleaved player to use these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit player", "emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return #", "NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not dead ->", "open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file:", "+ player.role.ego_self.instruction + \" \" + player.role.ego_self.action try: await ctx.author.send(msg) except discord.Forbidden: pass", "learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character -> RoleCannotUseCommand if isinstance(error,", "NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with", "-> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass #", "isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction", "channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character", "getattr(error, 'original', error) await ctx.send(error) # Non-registered or quit player -> NotAPlayer elif", "a non-fleaved player to use these commands. \"\"\" return check_if_is_player(ctx) # Registered non-quit", "await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] #", "ravenkeeper \"\"\" def __init__(self, client): self.client = client def cog_check(self, ctx): \"\"\"Check performed", "isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return #", "# Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it passed", "documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn", "it passed all the checks but raised an error in the character class", "AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn with open('botutils/bot_text.json') as json_file: language", "checks but raised an error in the character class elif isinstance(error, AbilityForbidden): error", "DeadOnlyCommand): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dead_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Missing argument -> commands.MissingRequiredArgument", "# Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return # Incorrect phase ->", "documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct phase -> NotNight @commands.check(check_if_dm) #", "# Correct channel -> NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) #", "commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction +", "documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC in-game commands cog Learn command - used by ravenkeeper \"\"\" def", "as json_file: language = json.load(json_file) error_str = language[\"system\"][\"error\"] with open('botc/game_text.json') as json_file: documentation", "= BOTCUtils.get_player_from_id(ctx.author.id) await player.role.ego_self.register_learn(player, learned) @learn.error async def learn_error(self, ctx, error): emoji =", "import discord import traceback import json from discord.ext import commands from botc import", "an error in the character class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original',", "brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"] ) @commands.check(check_if_is_dawn) # Correct", "RoleCannotUseCommand, \\ check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \\ NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn", "NotDMChannel @commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand", "botutils import discord import traceback import json from discord.ext import commands from botc", "-> NotDMChannel elif isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument elif isinstance(error,", "cog_check(self, ctx): \"\"\"Check performed on all commands of this cog. Must be a", "self.client = client def cog_check(self, ctx): \"\"\"Check performed on all commands of this", "If it passed all the checks but raised an error in the character", "cog Learn command - used by ravenkeeper \"\"\" def __init__(self, client): self.client =", "the checks but raised an error in the character class elif isinstance(error, AbilityForbidden):", "try: await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error except Exception: await", "= client def cog_check(self, ctx): \"\"\"Check performed on all commands of this cog.", "usage: learn <player> and <player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id) await", "'original', error) await ctx.send(error) # Non-registered or quit player -> NotAPlayer elif isinstance(error,", "performed on all commands of this cog. Must be a non-fleaved player to", "emoji)) except discord.Forbidden: pass # Player not dead -> DeadOnlyCommand elif isinstance(error, DeadOnlyCommand):", "the character class elif isinstance(error, AbilityForbidden): error = getattr(error, 'original', error) await ctx.send(error)", "\"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help = documentation[\"doc\"][\"learn\"][\"help\"], description = documentation[\"doc\"][\"learn\"][\"description\"]", "msg = player.role.ego_self.emoji + \" \" + player.role.ego_self.instruction + \" \" + player.role.ego_self.action", "with open('botc/game_text.json') as json_file: documentation = json.load(json_file) class Learn(commands.Cog, name = documentation[\"misc\"][\"abilities_cog\"]): \"\"\"BoTC", "command usage: learn <player> and <player> and... characters: ravenkeeper \"\"\" player = BOTCUtils.get_player_from_id(ctx.author.id)", "player -> NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True,", "@commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand @commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand async", "json from discord.ext import commands from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \\", "Learn command - used by ravenkeeper \"\"\" def __init__(self, client): self.client = client", "Incorrect character -> RoleCannotUseCommand if isinstance(error, RoleCannotUseCommand): return # If it passed all", "elif isinstance(error, NotDMChannel): return # Incorrect argument -> commands.BadArgument elif isinstance(error, commands.BadArgument): return", "traceback import json from discord.ext import commands from botc import check_if_is_player, check_if_is_night, check_if_dm,", "= True, name = \"learn\", hidden = False, brief = documentation[\"doc\"][\"learn\"][\"brief\"], help =", "NotAPlayer # ---------- LEARN COMMAND (Ravenkeeper) ---------------------------------------- @commands.command( pass_context = True, name =", "-> commands.MissingRequiredArgument elif isinstance(error, commands.MissingRequiredArgument): player = BOTCUtils.get_player_from_id(ctx.author.id) msg = player.role.ego_self.emoji + \"", "but raised an error in the character class elif isinstance(error, AbilityForbidden): error =", "await ctx.author.send(msg) except discord.Forbidden: pass else: try: raise error except Exception: await ctx.send(error_str)", "@learn.error async def learn_error(self, ctx, error): emoji = documentation[\"cmd_warnings\"][\"x_emoji\"] # Incorrect character ->", "elif isinstance(error, NotDawn): try: await ctx.author.send(documentation[\"cmd_warnings\"][\"dawn_only\"].format(ctx.author.mention, emoji)) except discord.Forbidden: pass # Player not" ]
[ "TYPE_INT = 1 def __init__(self, name, field_type, default_value): self.name = name self.field_type =", "default_value): self.name = name self.field_type = field_type self.default_value = default_value class IInteractiveSource(object): \"\"\"", "self.default_value = default_value class IInteractiveSource(object): \"\"\" A content source that a user can", "whose pages can be created by the `chef prepare` command. \"\"\" def setupPrepareParser(self,", "class IInteractiveSource(object): \"\"\" A content source that a user can interact with in", "parser, app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A", "IPreparingSource(object): \"\"\" Defines the interface for a source whose pages can be created", "def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object):", "= 1 def __init__(self, name, field_type, default_value): self.name = name self.field_type = field_type", "self.name = name self.field_type = field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A", "Defines the interface for a source whose pages can be created by the", "administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1 def __init__(self, name,", "\"\"\" A field to display in the administration web UI. \"\"\" TYPE_STRING =", "= name self.field_type = field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A content", "\"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class", "source whose pages can be created by the `chef prepare` command. \"\"\" def", "raise NotImplementedError() class InteractiveField(object): \"\"\" A field to display in the administration web", "name self.field_type = field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A content source", "web UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1 def __init__(self, name, field_type,", "= default_value class IInteractiveSource(object): \"\"\" A content source that a user can interact", "created by the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError()", "command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError()", "pages can be created by the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser,", "createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field to display in the", "display in the administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1", "the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self,", "0 TYPE_INT = 1 def __init__(self, name, field_type, default_value): self.name = name self.field_type", "be created by the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise", "a user can interact with in the administration web UI. \"\"\" def getInteractiveFields(self):", "__init__(self, name, field_type, default_value): self.name = name self.field_type = field_type self.default_value = default_value", "A field to display in the administration web UI. \"\"\" TYPE_STRING = 0", "args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field to display in the administration", "content source that a user can interact with in the administration web UI.", "prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args): raise", "default_value class IInteractiveSource(object): \"\"\" A content source that a user can interact with", "InteractiveField(object): \"\"\" A field to display in the administration web UI. \"\"\" TYPE_STRING", "IInteractiveSource(object): \"\"\" A content source that a user can interact with in the", "field to display in the administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT", "NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field to display", "= 0 TYPE_INT = 1 def __init__(self, name, field_type, default_value): self.name = name", "`chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args):", "for a source whose pages can be created by the `chef prepare` command.", "= field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A content source that a", "class InteractiveField(object): \"\"\" A field to display in the administration web UI. \"\"\"", "1 def __init__(self, name, field_type, default_value): self.name = name self.field_type = field_type self.default_value", "\"\"\" TYPE_STRING = 0 TYPE_INT = 1 def __init__(self, name, field_type, default_value): self.name", "name, field_type, default_value): self.name = name self.field_type = field_type self.default_value = default_value class", "field_type, default_value): self.name = name self.field_type = field_type self.default_value = default_value class IInteractiveSource(object):", "that a user can interact with in the administration web UI. \"\"\" def", "the interface for a source whose pages can be created by the `chef", "a source whose pages can be created by the `chef prepare` command. \"\"\"", "setupPrepareParser(self, parser, app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\"", "the administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1 def __init__(self,", "\"\"\" Defines the interface for a source whose pages can be created by", "def __init__(self, name, field_type, default_value): self.name = name self.field_type = field_type self.default_value =", "app): raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field", "\"\"\" A content source that a user can interact with in the administration", "source that a user can interact with in the administration web UI. \"\"\"", "can interact with in the administration web UI. \"\"\" def getInteractiveFields(self): raise NotImplementedError()", "UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1 def __init__(self, name, field_type, default_value):", "user can interact with in the administration web UI. \"\"\" def getInteractiveFields(self): raise", "field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A content source that a user", "NotImplementedError() class InteractiveField(object): \"\"\" A field to display in the administration web UI.", "self.field_type = field_type self.default_value = default_value class IInteractiveSource(object): \"\"\" A content source that", "raise NotImplementedError() def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field to", "class IPreparingSource(object): \"\"\" Defines the interface for a source whose pages can be", "can be created by the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app):", "def createContent(self, args): raise NotImplementedError() class InteractiveField(object): \"\"\" A field to display in", "to display in the administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT =", "interface for a source whose pages can be created by the `chef prepare`", "in the administration web UI. \"\"\" TYPE_STRING = 0 TYPE_INT = 1 def", "TYPE_STRING = 0 TYPE_INT = 1 def __init__(self, name, field_type, default_value): self.name =", "by the `chef prepare` command. \"\"\" def setupPrepareParser(self, parser, app): raise NotImplementedError() def", "A content source that a user can interact with in the administration web" ]
[ "default for i, aline in enumerate(afile, start=1): if randrange(i) == 0: # random", "from random import randrange from film import film_embed from api import api_call import", "random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film", "import randrange from film import film_embed from api import api_call import os async", "line = default for i, aline in enumerate(afile, start=1): if randrange(i) == 0:", "\"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None):", "random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None): line = default for i,", "film import film_embed from api import api_call import os async def random_embed(): __location__", "default=None): line = default for i, aline in enumerate(afile, start=1): if randrange(i) ==", "randrange from film import film_embed from api import api_call import os async def", "api_call import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__,", "import api_call import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f =", "api import api_call import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f", "def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\")", "await film_embed(film) def random_line(afile, default=None): line = default for i, aline in enumerate(afile,", "film_embed(film) def random_line(afile, default=None): line = default for i, aline in enumerate(afile, start=1):", "start=1): if randrange(i) == 0: # random int [0..i) line = aline return", "film_embed from api import api_call import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(),", "aline in enumerate(afile, start=1): if randrange(i) == 0: # random int [0..i) line", "random import randrange from film import film_embed from api import api_call import os", "errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None): line =", "enumerate(afile, start=1): if randrange(i) == 0: # random int [0..i) line = aline", "= random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None): line = default for", "in enumerate(afile, start=1): if randrange(i) == 0: # random int [0..i) line =", "async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\",", "os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close()", "i, aline in enumerate(afile, start=1): if randrange(i) == 0: # random int [0..i)", "import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"),", "f.close() return await film_embed(film) def random_line(afile, default=None): line = default for i, aline", "__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film =", "film = random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None): line = default", "if randrange(i) == 0: # random int [0..i) line = aline return line", "os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return", "= open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film)", "= os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f)", "= default for i, aline in enumerate(afile, start=1): if randrange(i) == 0: #", "f = open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await", "return await film_embed(film) def random_line(afile, default=None): line = default for i, aline in", "from api import api_call import os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))", "import film_embed from api import api_call import os async def random_embed(): __location__ =", "\"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film) def random_line(afile,", "from film import film_embed from api import api_call import os async def random_embed():", "def random_line(afile, default=None): line = default for i, aline in enumerate(afile, start=1): if", "encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film) def random_line(afile, default=None): line", "for i, aline in enumerate(afile, start=1): if randrange(i) == 0: # random int", "random_line(afile, default=None): line = default for i, aline in enumerate(afile, start=1): if randrange(i)", "os async def random_embed(): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = open(os.path.join(__location__, \"films.txt\"), \"r\",", "open(os.path.join(__location__, \"films.txt\"), \"r\", encoding=\"utf8\", errors=\"ignore\") film = random_line(f) f.close() return await film_embed(film) def" ]
[ "settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be ready') def _wait_for_object(*args): return _wait_for(lambda:", "= k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj):", "body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api", "get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return", "_object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env,", "_get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment):", "if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout", "wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace,", "namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def", "def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name']", "namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api", "k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env,", "= _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace,", "wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def", "k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api,", "k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return", "get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client =", "wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env,", "return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service", "not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args =", "def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None,", "name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True):", "if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job", "if conditions is None: return True complete = any(condition.type == 'Complete' for condition", "= 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps", "k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace =", "'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment(", "= any(condition.type == 'Complete' for condition in job.status.conditions) return not complete def list_running_jobs(env,", "def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, )", "def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, )", "k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env,", "job in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return", "wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env)", "{} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict): return", "def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service',", "obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env):", "conditions is None: return True complete = any(condition.type == 'Complete' for condition in", "namespace) return [job for job in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\",", "_object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment =", "service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service):", "namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api =", "def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def", "deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment):", "k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment)", "namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace,", "import client, config from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions", "namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod)", "not complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job", "for job in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def", "import sleep from kubernetes import client, config from kubernetes.client.rest import ApiException from kubetools.constants", "name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return", "pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set):", "def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in jobs", "_object_exists(api, method, namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else:", ") _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env)", "{name} to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args):", "def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env)", "namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env,", "True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def", "_get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method,", "_wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace',", "ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings", "= _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env,", ") return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env,", "namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace,", "list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in jobs if", "_get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items", "body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace, service):", "def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY)", "_get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if", "get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) ==", "_get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client", "if propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one", "raise KubeBuildError(f'Timeout waiting for {name} to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args)", "k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace,", "create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None,", "= k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment))", "config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env)", "return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, )", "job, propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must", "0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps >", "namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service',", "k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment):", "body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace,", "k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api", "create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, )", "'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return", "return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j =", "k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace',", "k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions is", "list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in jobs if", "= _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return", "sleeps = 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if", "= _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas ==", "'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service(", "_get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj):", "check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True", ") _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return", "= _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace,", "_get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace,", "k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api", "namespace, service) return k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service =", "None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace(", "from kubernetes import client, config from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY", ") if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api", "namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env)", "return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api,", "settings = get_settings() sleeps = 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps", "deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)", "check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded == j.spec.completions: return True", "def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set',", "= job.status.conditions if conditions is None: return True complete = any(condition.type == 'Complete'", "j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded == j.spec.completions: return True _wait_for(check_job,", "_wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env,", ") return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), )", "name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return", "namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, )", "time import sleep from kubernetes import client, config from kubernetes.client.rest import ApiException from", "def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace',", "client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj),", "namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace):", "service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service", "deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env,", "if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api =", "KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy']", "= _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env,", "def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def", "k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job,", "namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env)", "body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api =", "def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return", "= k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace,", "in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace,", "= k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api =", "True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api =", "namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api =", "= _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api,", "_get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def", "valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and", "namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api =", "namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env)", "False raise return True def _wait_for(function, name='object'): settings = get_settings() sleeps = 0", "'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment',", "= _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return", "_get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api", "job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if", "namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj),", "replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def", "is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api", "namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api", "_get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace,", "_get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions is None: return", "name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env)", "_object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service =", "namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment =", "KubeBuildError(f'Timeout waiting for {name} to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is", "namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)(", "kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {}", ") wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env)", "'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service',", ") _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items", "namespace, service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace,", "args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job',", "k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env)", "get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {}", "k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env,", "namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set)", "getattr(api, method)( name=get_object_name(obj), ) except ApiException as e: if e.status == 404: return", "k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env,", "_get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return", "body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api", "KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj):", "is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env):", "= _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api", "def is_running(job): conditions = job.status.conditions if conditions is None: return True complete =", "return [job for job in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs =", "for job in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace)", "namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod):", "ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args)", "namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace,", "= _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if namespace: getattr(api,", "service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service)", "namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env,", "return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment),", "def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment',", "k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env,", "return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, )", "return True def _wait_for(function, name='object'): settings = get_settings() sleeps = 0 while True:", "_get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api", "def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions", "k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j", "namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return", "namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env)", "d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment,", "k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job", "_wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace", "'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj):", "k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod(", "return True complete = any(condition.type == 'Complete' for condition in job.status.conditions) return not", "k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions is None: return True complete", "obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def", "name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as e: if e.status", "delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace,", "return [job for job in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\",", "def _wait_for(function, name='object'): settings = get_settings() sleeps = 0 while True: if function():", "= k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def", "k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def", "k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d", "'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service", "namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace,", "def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace,", "not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if", "return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace,", "= k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env,", "namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job():", "return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api,", "client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj):", "_get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client)", "_get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service", "= get_settings() sleeps = 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps +=", "return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, )", "service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def", "wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def", "list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api =", "jobs = list_jobs(env, namespace) return [job for job in jobs if not is_running(job)]", "list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api =", "service) return k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service(", "args = {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job),", "namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj)", "= _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace):", "return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for", "'Complete' for condition in job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs =", "job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion:", "_get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api =", "k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api,", "import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or", "True def _wait_for(function, name='object'): settings = get_settings() sleeps = 0 while True: if", "return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace,", "return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client =", "_wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def", "namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, )", "_get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment", "be one of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api", "delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace,", "name='object'): settings = get_settings() sleeps = 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME)", "propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of", "from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def", "def _object_exists(api, method, namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), )", "is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in", "api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def", "{} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args,", "k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def", "k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service)", "k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api", "d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items", "= k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env,", "else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as e: if e.status == 404:", "import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import", "namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace,", "def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return", "namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions", "job in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env,", "return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def", "\"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not in", "namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment,", "job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job,", "if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except", "list_jobs(env, namespace) return [job for job in jobs if not is_running(job)] valid_propagation_policies =", "False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def", "namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return", "_wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def", "getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as", "complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in", "_get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj,", "def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded == j.spec.completions: return", "= _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api =", "sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name}", "delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace,", "_get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace):", "return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def", "namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env, namespace, job):", "return not complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for", "= {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace,", "isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return", "if e.status == 404: return False raise return True def _wait_for(function, name='object'): settings", "k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def", "kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations", "propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api,", "1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be ready')", "_get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if namespace: getattr(api, method)(", "= _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment),", "namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service)", "def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod',", "k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment,", "def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api", "delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj)", "= _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace", "k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api =", "None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace,", "return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, )", "if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, )", "is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy", "> settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be ready') def _wait_for_object(*args): return", "k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env,", "def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api", "== 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env)", "_wait_for(function, name='object'): settings = get_settings() sleeps = 0 while True: if function(): return", "kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings", "def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env):", "name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api =", "= _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment", "condition in job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace)", "list_jobs(env, namespace) return [job for job in jobs if is_running(job)] def list_complete_jobs(env, namespace):", "body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items", "body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj),", ") _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items", "try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), )", "wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment", "_get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace,", "404: return False raise return True def _wait_for(function, name='object'): settings = get_settings() sleeps", "None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api", "= _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service),", "list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace", "api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try: if namespace:", "k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace,", "= k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api =", "== 404: return False raise return True def _wait_for(function, name='object'): settings = get_settings()", "True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise", "wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace,", "def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return", "= k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded == j.spec.completions: return True _wait_for(check_job, get_object_name(job))", "k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api =", "get_settings() sleeps = 0 while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1", "raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args = {} if propagation_policy:", "return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client =", "_wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job", "sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to", "obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj),", "_get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def", "def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def", "= _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace(", "namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env)", "k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api", "def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env):", "is None: return True complete = any(condition.type == 'Complete' for condition in job.status.conditions)", "return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment", "raise return True def _wait_for(function, name='object'): settings = get_settings() sleeps = 0 while", "namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def", "def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace,", "delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation", "_get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace,", "k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service,", "policy must be one of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] =", "namespace): jobs = list_jobs(env, namespace) return [job for job in jobs if is_running(job)]", ") else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as e: if e.status ==", "namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace, service): k8s_core_api", "namespace): jobs = list_jobs(env, namespace) return [job for job in jobs if not", "_get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace,", "e.status == 404: return False raise return True def _wait_for(function, name='object'): settings =", "obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj,", "as e: if e.status == 404: return False raise return True def _wait_for(function,", "_get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas:", "job.status.conditions if conditions is None: return True complete = any(condition.type == 'Complete' for", "True complete = any(condition.type == 'Complete' for condition in job.status.conditions) return not complete", "= _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod),", "k8s_job def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job(", "except ApiException as e: if e.status == 404: return False raise return True", "must be one of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] = propagation_policy", "_get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env)", "list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api =", "deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace,", "get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions =", "_wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return", "return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client)", "name=get_object_name(obj), ) except ApiException as e: if e.status == 404: return False raise", "method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as e:", "def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace, obj): try:", "jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job,", "def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is", "jobs = list_jobs(env, namespace) return [job for job in jobs if is_running(job)] def", "= list_jobs(env, namespace) return [job for job in jobs if is_running(job)] def list_complete_jobs(env,", "= _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace):", "def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies: raise", "list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api =", "namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace,", "_object_exists(*args) is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None,", ") if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api", "[\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not", "k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api", "while True: if function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS:", "== 'Complete' for condition in job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs", "import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def", "propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be", "k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, )", "config from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError", "= _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env):", "k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment(", "k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment(", "+= 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be", "k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment(", "k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api", "def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for job in jobs", "True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job):", "namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except ApiException as e: if", "k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set(", "is False) def namespace_exists(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj)", "service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace,", "return k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service),", "dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True", "list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if", "return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d =", "k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api,", "namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)", "_wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return _object_exists(k8s_apps_api,", "propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job)", "api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def", "of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env)", "_wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False)", "return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env) return client.BatchV1Api(api_client=api_client) def _object_exists(api, method, namespace,", "create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api,", "any(condition.type == 'Complete' for condition in job.status.conditions) return not complete def list_running_jobs(env, namespace):", "name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api =", "_wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def", "or {} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj):", "_wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env)", "== d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return", ") _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace, service): k8s_core_api =", "return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name", "kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj):", "= _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace,", "\"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies:", "namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def", "method, namespace, obj): try: if namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api,", "from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from", "k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_service(", "_get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded == j.spec.completions:", "sleep from kubernetes import client, config from kubernetes.client.rest import ApiException from kubetools.constants import", "namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env)", "= _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded ==", "delete_pod(env, namespace, pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace,", "or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if isinstance(obj, dict):", "namespace, job, propagation_policy=None): if propagation_policy and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy", "def wait_for_job(env, namespace, job): k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job),", "def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api", "namespace) return [job for job in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs", "{valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job(", "k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas", "'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env,", "jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job for", "client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client = _get_api_client(env)", ") wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env)", "[job for job in jobs if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"]", "waiting for {name} to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True)", "update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace", "if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api =", "e: if e.status == 404: return False raise return True def _wait_for(function, name='object'):", "MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels", "create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env,", "= _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return", "for condition in job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs = list_jobs(env,", ") _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env)", "for {name} to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def", "'read_namespace', None, namespace_obj) return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace =", "pod): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespaced_pod( name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def", "client, config from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import", "k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api,", "= _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions is None:", "conditions = job.status.conditions if conditions is None: return True complete = any(condition.type ==", "{} def get_object_name(obj): if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if", "to be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return", "namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment():", "in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job", "in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args = {}", "method)( name=get_object_name(obj), ) except ApiException as e: if e.status == 404: return False", "update_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, )", "service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env,", "'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env,", "_get_k8s_apps_api(env) return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def create_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env)", "_wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api,", "k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set), namespace=namespace, ) _wait_for_no_object(k8s_apps_api,", "return k8s_namespace def update_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.patch_namespace( name=get_object_name(namespace_obj), body=namespace_obj,", "is_running(job): conditions = job.status.conditions if conditions is None: return True complete = any(condition.type", "name=get_object_name(namespace_obj), body=namespace_obj, ) return k8s_namespace def delete_namespace(env, namespace, namespace_obj): k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace(", "name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env,", "'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env,", "namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if", "valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args = {} if", "obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env)", "def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if d.status.ready_replicas == d.status.replicas: return", "def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace,", "_get_k8s_core_api(env) k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service):", "deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def", "return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, )", "k8s_core_api.delete_namespaced_service( name=get_object_name(service), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service) def service_exists(env, namespace, service): k8s_core_api", "complete = any(condition.type == 'Complete' for condition in job.status.conditions) return not complete def", "k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def", "return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return", "if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def _get_k8s_core_api(env): api_client", "def wait_for_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment),", "def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def _get_api_client(env): return config.new_client_from_config(context=env) def", "return False raise return True def _wait_for(function, name='object'): settings = get_settings() sleeps =", "_object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def namespace_exists(env, namespace_obj):", "replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items def delete_service(env, namespace, service):", "and propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\")", "def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api = _get_k8s_core_api(env)", "update_deployment(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, )", ") _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env, namespace, deployment): k8s_apps_api = _get_k8s_apps_api(env) return", "k8s_core_api = _get_k8s_core_api(env) k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace):", "= [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None): if propagation_policy and propagation_policy", "k8s_batch_api = _get_k8s_batch_api(env) def check_job(): j = k8s_batch_api.read_namespaced_job( name=get_object_name(job), namespace=namespace, ) if j.status.succeeded", "if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be ready') def", "def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_pod(namespace=namespace).items def delete_pod(env, namespace, pod): k8s_core_api", "k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job) return k8s_job def wait_for_job(env,", "return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools': return True def", "namespace, service) def create_service(env, namespace, service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service,", "if not is_running(job)] valid_propagation_policies = [\"Orphan\", \"Background\", \"Foreground\"] def delete_job(env, namespace, job, propagation_policy=None):", "namespace: getattr(api, method)( namespace=namespace, name=get_object_name(obj), ) else: getattr(api, method)( name=get_object_name(obj), ) except ApiException", "[job for job in jobs if is_running(job)] def list_complete_jobs(env, namespace): jobs = list_jobs(env,", "**args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api =", "return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions = job.status.conditions if conditions is None: return True", "namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job(", "return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj): if", "k8s_core_api.patch_namespaced_service( name=get_object_name(service), body=service, namespace=namespace, ) return k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env)", "k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def update_service(env, namespace,", "_get_k8s_core_api(env): api_client = _get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client)", "be ready') def _wait_for_object(*args): return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda:", "namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return", "name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment) return k8s_deployment def wait_for_deployment(env, namespace, deployment):", "None: return True complete = any(condition.type == 'Complete' for condition in job.status.conditions) return", "kubernetes import client, config from kubernetes.client.rest import ApiException from kubetools.constants import MANAGED_BY_ANNOTATION_KEY from", "from time import sleep from kubernetes import client, config from kubernetes.client.rest import ApiException", ") except ApiException as e: if e.status == 404: return False raise return", "job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return [job", "in job.status.conditions) return not complete def list_running_jobs(env, namespace): jobs = list_jobs(env, namespace) return", "import MANAGED_BY_ANNOTATION_KEY from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return", ") _wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set) def list_services(env, namespace): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespaced_service(namespace=namespace).items", "k8s_core_api.delete_namespace( name=get_object_name(namespace_obj), ) _wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj) def list_pods(env, namespace): k8s_core_api = _get_k8s_core_api(env)", "deployment): k8s_apps_api = _get_k8s_apps_api(env) def check_deployment(): d = k8s_apps_api.read_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) if", "k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service) return k8s_service def", "deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.create_namespaced_deployment( body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace, k8s_deployment)", "k8s_core_api = _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env)", "_wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env) return k8s_batch_api.list_namespaced_job(namespace=namespace).items def is_running(job): conditions", "propagation_policy not in valid_propagation_policies: raise KubeBuildError(f\"Propagation policy must be one of {valid_propagation_policies}\") args", "ApiException as e: if e.status == 404: return False raise return True def", "namespace_obj) def list_namespaces(env): k8s_core_api = _get_k8s_core_api(env) return k8s_core_api.list_namespace().items def create_namespace(env, namespace_obj): k8s_core_api =", "if isinstance(obj, dict): return obj['metadata']['name'] return obj.metadata.name def is_kubetools_object(obj): if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools':", "= _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items def delete_replica_set(env, namespace, replica_set): k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_replica_set( name=get_object_name(replica_set),", "service): k8s_core_api = _get_k8s_core_api(env) k8s_service = k8s_core_api.create_namespaced_service( body=service, namespace=namespace, ) _wait_for_object(k8s_core_api, 'read_namespaced_service', namespace,", "= _get_k8s_core_api(env) return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service) def create_service(env, namespace, service): k8s_core_api =", "= list_jobs(env, namespace) return [job for job in jobs if not is_running(job)] valid_propagation_policies", "sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting for {name} to be ready') def _wait_for_object(*args):", "k8s_apps_api = _get_k8s_apps_api(env) k8s_apps_api.delete_namespaced_deployment( name=get_object_name(deployment), namespace=namespace, ) _wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment) def deployment_exists(env,", "k8s_core_api = _get_k8s_core_api(env) k8s_namespace = k8s_core_api.create_namespace( body=namespace_obj, ) _wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj) return", "one of {valid_propagation_policies}\") args = {} if propagation_policy: args['propagation_policy'] = propagation_policy k8s_batch_api =", "_get_api_client(env) return client.CoreV1Api(api_client=api_client) def _get_k8s_apps_api(env): api_client = _get_api_client(env) return client.AppsV1Api(api_client=api_client) def _get_k8s_batch_api(env): api_client", "k8s_service def list_deployments(env, namespace): k8s_apps_api = _get_k8s_apps_api(env) return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items def delete_deployment(env, namespace, deployment):", "function(): return sleep(settings.WAIT_SLEEP_TIME) sleeps += 1 if sleeps > settings.WAIT_MAX_SLEEPS: raise KubeBuildError(f'Timeout waiting", "d.status.ready_replicas == d.status.replicas: return True _wait_for(check_deployment, get_object_name(deployment)) def list_jobs(env, namespace): k8s_batch_api = _get_k8s_batch_api(env)", "get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return obj.metadata.annotations or {} def get_object_name(obj):", "deployment): k8s_apps_api = _get_k8s_apps_api(env) k8s_deployment = k8s_apps_api.patch_namespaced_deployment( name=get_object_name(deployment), body=deployment, namespace=namespace, ) wait_for_deployment(env, namespace,", "name=get_object_name(pod), namespace=namespace, ) _wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod) def list_replica_sets(env, namespace): k8s_apps_api = _get_k8s_apps_api(env)", "= propagation_policy k8s_batch_api = _get_k8s_batch_api(env) k8s_batch_api.delete_namespaced_job( name=get_object_name(job), namespace=namespace, **args, ) _wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace,", "from kubetools.exceptions import KubeBuildError from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or", "return _wait_for(lambda: _object_exists(*args) is True) def _wait_for_no_object(*args): return _wait_for(lambda: _object_exists(*args) is False) def", "'read_namespaced_job', namespace, job) def create_job(env, namespace, job, wait_for_completion=True): k8s_batch_api = _get_k8s_batch_api(env) k8s_job =", "from kubetools.settings import get_settings def get_object_labels_dict(obj): return obj.metadata.labels or {} def get_object_annotations_dict(obj): return", "= _get_k8s_batch_api(env) k8s_job = k8s_batch_api.create_namespaced_job( body=job, namespace=namespace, ) if wait_for_completion: wait_for_job(env, namespace, k8s_job)" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "See the # License for the specific language governing permissions and limitations #", "License. # You may obtain a copy of the License at # #", "the specific language governing permissions and limitations # under the License. from jenkins_jobs.errors", "law or agreed to in writing, software # distributed under the License is", "jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in ['site', 'source', 'target']:", "compliance with the License. # You may obtain a copy of the License", "express or implied. See the # License for the specific language governing permissions", "this file except in compliance with the License. # You may obtain a", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "data): for attr in ['site', 'source', 'target']: if attr not in data: raise", "import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in ['site', 'source', 'target']: if", "License for the specific language governing permissions and limitations # under the License.", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0", "use this file except in compliance with the License. # You may obtain", "permissions and limitations # under the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "xml_parent, data): for attr in ['site', 'source', 'target']: if attr not in data:", "not use this file except in compliance with the License. # You may", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "limitations # under the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data):", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "governing permissions and limitations # under the License. from jenkins_jobs.errors import MissingAttributeError def", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "implied. See the # License for the specific language governing permissions and limitations", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "OF ANY KIND, either express or implied. See the # License for the", "the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in", "language governing permissions and limitations # under the License. from jenkins_jobs.errors import MissingAttributeError", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "specific language governing permissions and limitations # under the License. from jenkins_jobs.errors import", "2.0 (the \"License\"); # you may not use this file except in compliance", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "under the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr", "KIND, either express or implied. See the # License for the specific language", "(the \"License\"); # you may not use this file except in compliance with", "either express or implied. See the # License for the specific language governing", "Inc # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# # Unless required by applicable law or agreed to in writing, software", "Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "# under the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "Copyright 2016 Red Hat, Inc # # Licensed under the Apache License, Version", "def afs_publisher(parser, xml_parent, data): for attr in ['site', 'source', 'target']: if attr not", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "or implied. See the # License for the specific language governing permissions and", "<reponame>cwolferh/project-config # Copyright 2016 Red Hat, Inc # # Licensed under the Apache", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "and limitations # under the License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "for the specific language governing permissions and limitations # under the License. from", "file except in compliance with the License. # You may obtain a copy", "from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in ['site', 'source',", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "ANY KIND, either express or implied. See the # License for the specific", "the # License for the specific language governing permissions and limitations # under", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "License. from jenkins_jobs.errors import MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in ['site',", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "for attr in ['site', 'source', 'target']: if attr not in data: raise MissingAttributeError(attr)", "or agreed to in writing, software # distributed under the License is distributed", "MissingAttributeError def afs_publisher(parser, xml_parent, data): for attr in ['site', 'source', 'target']: if attr", "# License for the specific language governing permissions and limitations # under the", "afs_publisher(parser, xml_parent, data): for attr in ['site', 'source', 'target']: if attr not in", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "# Copyright 2016 Red Hat, Inc # # Licensed under the Apache License,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "Hat, Inc # # Licensed under the Apache License, Version 2.0 (the \"License\");" ]
[ "180 workers = multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread' # check", "kqueen.config import current_config from prometheus_client import multiprocess import multiprocessing import os app_config =", "+ 1 worker_class = 'gthread' # check for prometheus settings if 'prometheus_multiproc_dir' not", "for prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is required')", "* 2 + 1 worker_class = 'gthread' # check for prometheus settings if", "'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is required') def child_exit(server, worker): multiprocess.mark_process_dead(worker.pid)", "= current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers =", "check for prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is", "from kqueen.config import current_config from prometheus_client import multiprocess import multiprocessing import os app_config", "port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count() * 2 + 1 worker_class", "os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180", "= multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread' # check for prometheus", "if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is required') def child_exit(server, worker):", "host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count() * 2 + 1", "= 'gthread' # check for prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise", "1 worker_class = 'gthread' # check for prometheus settings if 'prometheus_multiproc_dir' not in", "multiprocess import multiprocessing import os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'),", "= 180 workers = multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread' #", "prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is required') def", "workers = multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread' # check for", ") timeout = 180 workers = multiprocessing.cpu_count() * 2 + 1 worker_class =", "import os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout =", "multiprocessing import os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout", "'gthread' # check for prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable", "app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers", "prometheus_client import multiprocess import multiprocessing import os app_config = current_config() bind = \"{host}:{port}\".format(", "<filename>kqueen/gunicorn.py from kqueen.config import current_config from prometheus_client import multiprocess import multiprocessing import os", "import multiprocessing import os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), )", "settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir is required') def child_exit(server,", "import multiprocess import multiprocessing import os app_config = current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'),", "current_config from prometheus_client import multiprocess import multiprocessing import os app_config = current_config() bind", "worker_class = 'gthread' # check for prometheus settings if 'prometheus_multiproc_dir' not in os.environ:", "= \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count() * 2", "current_config() bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count()", "2 + 1 worker_class = 'gthread' # check for prometheus settings if 'prometheus_multiproc_dir'", "timeout = 180 workers = multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread'", "from prometheus_client import multiprocess import multiprocessing import os app_config = current_config() bind =", "\"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count() * 2 +", "import current_config from prometheus_client import multiprocess import multiprocessing import os app_config = current_config()", "# check for prometheus settings if 'prometheus_multiproc_dir' not in os.environ: raise Exception('Variable prometheus_multiproc_dir", "bind = \"{host}:{port}\".format( host=app_config.get('KQUEEN_HOST'), port=app_config.get('KQUEEN_PORT'), ) timeout = 180 workers = multiprocessing.cpu_count() *", "multiprocessing.cpu_count() * 2 + 1 worker_class = 'gthread' # check for prometheus settings" ]
[ "key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results =", "'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5) for item", "'} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id", "json import jwt import cryptography #ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists')", "#print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id = '74G4697BU4' team_id =", "requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id = '74G4697BU4' team_id", "'74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'],", "applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5) for item in results['results']['albums']['data']: print(item['attributes']['name'])", "team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5)", "#print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P'", "import requests import json import jwt import cryptography #ploads = {'Authorization': 'Bearer '}", "= applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5) for item in results['results']['albums']['data']:", "= 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5) for", "am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>', types=['albums'], limit=5) for item in", "#ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy", "= '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results = am.search('<NAME>',", "jwt import cryptography #ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text)", "#print(r.json()) import applemusicpy secret_key = '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am", "#r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id =", "cryptography #ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import", "= {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key", "'' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id) results", "requests import json import jwt import cryptography #ploads = {'Authorization': 'Bearer '} #r", "import jwt import cryptography #ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers)", "secret_key = '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id,", "import json import jwt import cryptography #ploads = {'Authorization': 'Bearer '} #r =", "{'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key =", "import applemusicpy secret_key = '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am =", "'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = ''", "= '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key, key_id, team_id)", "= requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json()) import applemusicpy secret_key = '' key_id = '74G4697BU4'", "applemusicpy secret_key = '' key_id = '74G4697BU4' team_id = 'QTM38LJQ3P' am = applemusicpy.AppleMusic(secret_key,", "import cryptography #ploads = {'Authorization': 'Bearer '} #r = requests.get('https://api.music.apple.com/v1/me/library/playlists') #print(r.headers) #print(r.text) #print(r.json())" ]
[ "value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\",", "\"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\"", "= enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert", "import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str", "from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\",", "assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\"", "value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa", "None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\",", "as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\" in ex.value.message #", "pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None", "value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple", "3}) # noqa assert \"comma-separated\" in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError)", "value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"])", "<filename>tests/test_utils.py \"\"\" Utils tests \"\"\" import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators", "pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\")", "\"\"\" import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors", "pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated():", "\"comma-separated\" in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int,", "from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is", "\"\"\" Utils tests \"\"\" import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import", "PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none =", "enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list", "\"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple ==", "= enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str", "is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\",", "\"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\",", "enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none", "= enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\"))", "enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert", "ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int, None) assert", "import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none", "2, 3}) # noqa assert \"comma-separated\" in ex.value.message # noqa def test_conv_type(): with", "== \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError)", "conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str =", "pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type", "# noqa assert \"comma-separated\" in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as", "ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\" in ex.value.message # noqa", "noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int, None) assert \"limit\" in", "value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert", "assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list", "value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple", "value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list =", "assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) #", "enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\" in ex.value.message # noqa def", "tests \"\"\" import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from", "value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str == \"id1,id2\"", "pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\" in ex.value.message", "from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import conv_type def", "noqa assert \"comma-separated\" in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e:", "# noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int, None) assert \"limit\"", "enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1,", "import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated from pytwitter.utils.convertors import", "assert \"comma-separated\" in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\",", "value_is_list == \"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with", "test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\")", "enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\", value=\"id1,id2\") assert value_is_str ==", "value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex:", "\"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3})", "def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int, None) assert \"limit\" in e.value.message", "assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\"", "Utils tests \"\"\" import pytest from pytwitter.error import PyTwitterError from pytwitter.utils.validators import enf_comma_separated", "value={1, 2, 3}) # noqa assert \"comma-separated\" in ex.value.message # noqa def test_conv_type():", "with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert \"comma-separated\" in", "value=\"id1,id2\") assert value_is_str == \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list ==", "= enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\",", "\"id1,id2\" value_is_tuple = enf_comma_separated(name=\"tuple\", value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as", "value=(\"id1\", \"id2\")) assert value_is_tuple == \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2,", "== \"id1,id2\" with pytest.raises(PyTwitterError) as ex: enf_comma_separated(name=\"other\", value={1, 2, 3}) # noqa assert", "import enf_comma_separated from pytwitter.utils.convertors import conv_type def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert", "in ex.value.message # noqa def test_conv_type(): with pytest.raises(PyTwitterError) as e: conv_type(\"limit\", int, None)", "== \"id1,id2\" value_is_list = enf_comma_separated(name=\"array\", value=[\"id1\", \"id2\"]) assert value_is_list == \"id1,id2\" value_is_tuple =", "def test_comma_separated(): value_is_none = enf_comma_separated(name=\"none\", value=\"\") assert value_is_none is None value_is_str = enf_comma_separated(name=\"str\"," ]
[ "timeout self._source_id = None def wrap(func, funcid, args, kwargs): for value in func(*args,", "2014 <NAME> # # This program is free software; you can redistribute it", "routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid) def remove_all(self): \"\"\"Stop", "def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid]", "stop the old one. If no funcid is given, the function itself is", "print_d(\"Removed copool function id %r\" % funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\"", "= kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout", "<reponame>ch1huizong/Scode<filename>stdlib2-src/dist-packages/quodlibet/util/copool.py # Copyright 2006 <NAME>, <NAME> # 2014 <NAME> # # This program", "def add(self, func, *args, **kwargs): \"\"\"Register a routine to run in GLib main", "routine routine.resume() def _get(self, funcid): if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no", "kwargs): self.priority = priority self.timeout = timeout self._source_id = None def wrap(func, funcid,", "running at once. Starting a new function with the same ID will stop", "pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id =", "registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" % funcid)", "funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def", "funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid)", "if already running do nothing\"\"\" if not self.paused: return if self.timeout: self._source_id =", "a function that returns a Python iterator (e.g. generator) that provides values until", "= self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily", "values until it should stop being called. Optional Keyword Arguments: priority -- priority", "this function to iterate once.\"\"\" routine = self._get(funcid) return routine.step() # global instance", "func should be a function that returns a Python iterator (e.g. generator) that", "def step(self, funcid): \"\"\"Force this function to iterate once.\"\"\" routine = self._get(funcid) return", "already running do nothing\"\"\" if not self.paused: return if self.timeout: self._source_id = GLib.timeout_add(", "pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If", "return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self): self.__routines = {} def", "the terms of the GNU General Public License version 2 as # published", "pool, func, funcid, priority, timeout, args, kwargs): self.priority = priority self.timeout = timeout", "do nothing\"\"\" if not self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func,", "routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\"", "= CoPool() add = _copool.add pause = _copool.pause pause_all = _copool.pause_all remove =", "self._source_id = None class CoPool(object): def __init__(self): self.__routines = {} def add(self, func,", "no funcid is given, the function itself is used. The funcid must be", "as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines:", "Foundation \"\"\"Manage a pool of routines using Python iterators.\"\"\" from gi.repository import GLib", "running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a", "same funcid can be running at once. Starting a new function with the", "General Public License version 2 as # published by the Free Software Foundation", "routines using Python iterators.\"\"\" from gi.repository import GLib class _Routine(object): def __init__(self, pool,", "instead of idle_add (in milliseconds) Only one function with the same funcid can", "self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause()", "= priority self.timeout = timeout self._source_id = None def wrap(func, funcid, args, kwargs):", "**kwargs): \"\"\"Register a routine to run in GLib main loop. func should be", "\"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily", "modify # it under the terms of the GNU General Public License version", "-- mutex/removal identifier for this function timeout -- use timeout_add (with given timeout)", "of the GNU General Public License version 2 as # published by the", "if already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class", "old one. If no funcid is given, the function itself is used. The", "= {} def add(self, func, *args, **kwargs): \"\"\"Register a routine to run in", "published by the Free Software Foundation \"\"\"Manage a pool of routines using Python", "one function with the same funcid can be running at once. Starting a", "function to iterate once.\"\"\" routine = self._get(funcid) return routine.step() # global instance _copool", "Keyword Arguments: priority -- priority to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal", "def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused", "self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority)", "the old one. If no funcid is given, the function itself is used.", "already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object):", "% funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid in self.__routines.keys():", "value in func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func = wrap(func, funcid,", "%r\" % funcid) def step(self, funcid): \"\"\"Force this function to iterate once.\"\"\" routine", "= _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume = _copool.resume step =", "def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id", "at once. Starting a new function with the same ID will stop the", "timeout, args, kwargs): self.priority = priority self.timeout = timeout self._source_id = None def", "raise ValueError(\"no pooled routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop a registered", "stop being called. Optional Keyword Arguments: priority -- priority to run at (default", "= self._get(funcid) return routine.step() # global instance _copool = CoPool() add = _copool.add", "*args, **kwargs): \"\"\"Register a routine to run in GLib main loop. func should", "registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused", "is None def step(self): \"\"\"Raises StopIteration if the routine has nothing more to", "func) if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\",", "under the terms of the GNU General Public License version 2 as #", "once. Starting a new function with the same ID will stop the old", "def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def", "routine.resume() def _get(self, funcid): if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled", "def resume(self): \"\"\"Resume, if already running do nothing\"\"\" if not self.paused: return if", "def wrap(func, funcid, args, kwargs): for value in func(*args, **kwargs): yield True pool.remove(funcid)", "\"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\"", "%r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid in", "version 2 as # published by the Free Software Foundation \"\"\"Manage a pool", "= _copool.add pause = _copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all =", "for value in func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func = wrap(func,", "copool function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\"", "None) print_d(\"Added copool function %r with id %r\" % (func, funcid)) routine =", "function with the same funcid can be running at once. Starting a new", "not self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id", "once.\"\"\" routine = self._get(funcid) return routine.step() # global instance _copool = CoPool() add", "kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id %r\"", "GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self): self.__routines = {} def add(self,", "id %r\" % funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in", "# published by the Free Software Foundation \"\"\"Manage a pool of routines using", "routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" % funcid) def pause_all(self):", "will stop the old one. If no funcid is given, the function itself", "a pool of routines using Python iterators.\"\"\" from gi.repository import GLib class _Routine(object):", "usable as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in", "= _copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume =", "<NAME> # 2014 <NAME> # # This program is free software; you can", "with the same ID will stop the old one. If no funcid is", "print_d(\"Paused copool function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all registered", "nothing more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already running do", "at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function timeout -- use", "GLib class _Routine(object): def __init__(self, pool, func, funcid, priority, timeout, args, kwargs): self.priority", "args, kwargs): self.priority = priority self.timeout = timeout self._source_id = None def wrap(func,", "copool function id %r\" % funcid) def step(self, funcid): \"\"\"Force this function to", "priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id)", "pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\"", "(with given timeout) instead of idle_add (in milliseconds) Only one function with the", "provides values until it should stop being called. Optional Keyword Arguments: priority --", "this function timeout -- use timeout_add (with given timeout) instead of idle_add (in", "resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function", "self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop a", "registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\"", "copool function id %r\" % funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for", "del self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid) def remove_all(self): \"\"\"Stop all", "that provides values until it should stop being called. Optional Keyword Arguments: priority", "Python iterators.\"\"\" from gi.repository import GLib class _Routine(object): def __init__(self, pool, func, funcid,", "it and/or modify # it under the terms of the GNU General Public", "the same funcid can be running at once. Starting a new function with", "\"\"\"Temporarily pause all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid):", "def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool", "add = _copool.add pause = _copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all", "called. Optional Keyword Arguments: priority -- priority to run at (default GLib.PRIORITY_LOW) funcid", "timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if funcid in", "given, the function itself is used. The funcid must be usable as a", "pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self,", "Public License version 2 as # published by the Free Software Foundation \"\"\"Manage", "funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid) def", "of routines using Python iterators.\"\"\" from gi.repository import GLib class _Routine(object): def __init__(self,", "\"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id", "in GLib main loop. func should be a function that returns a Python", "be usable as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid", "pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool", "a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" %", "CoPool(object): def __init__(self): self.__routines = {} def add(self, func, *args, **kwargs): \"\"\"Register a", "pool of routines using Python iterators.\"\"\" from gi.repository import GLib class _Routine(object): def", "function id %r\" % funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid", "self.source_func() def resume(self): \"\"\"Resume, if already running do nothing\"\"\" if not self.paused: return", "function %r with id %r\" % (func, funcid)) routine = _Routine(self, func, funcid,", "# global instance _copool = CoPool() add = _copool.add pause = _copool.pause pause_all", "in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid) def remove(self,", "class CoPool(object): def __init__(self): self.__routines = {} def add(self, func, *args, **kwargs): \"\"\"Register", "return routine.step() # global instance _copool = CoPool() add = _copool.add pause =", "funcid, args, kwargs): for value in func(*args, **kwargs): yield True pool.remove(funcid) yield False", "def _get(self, funcid): if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine", "else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do", "running do nothing\"\"\" if not self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout,", "software; you can redistribute it and/or modify # it under the terms of", "funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine =", "If no funcid is given, the function itself is used. The funcid must", "pause all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume", "GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause,", "self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" % funcid) def step(self, funcid): \"\"\"Force", "timeout -- use timeout_add (with given timeout) instead of idle_add (in milliseconds) Only", "a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id", "all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause", "to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function timeout", "self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if self.paused: return", "print_d(\"Resumed copool function id %r\" % funcid) def step(self, funcid): \"\"\"Force this function", "with id %r\" % (func, funcid)) routine = _Routine(self, func, funcid, priority, timeout,", "function id %r\" % funcid) def step(self, funcid): \"\"\"Force this function to iterate", "funcid): if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" %", "# This program is free software; you can redistribute it and/or modify #", "= routine routine.resume() def _get(self, funcid): if funcid in self.__routines: return self.__routines[funcid] raise", "(func, funcid)) routine = _Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] =", "2 as # published by the Free Software Foundation \"\"\"Manage a pool of", "using Python iterators.\"\"\" from gi.repository import GLib class _Routine(object): def __init__(self, pool, func,", "the GNU General Public License version 2 as # published by the Free", "funcid -- mutex/removal identifier for this function timeout -- use timeout_add (with given", "Copyright 2006 <NAME>, <NAME> # 2014 <NAME> # # This program is free", "run in GLib main loop. func should be a function that returns a", "None def step(self): \"\"\"Raises StopIteration if the routine has nothing more to do\"\"\"", "a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid)", "same ID will stop the old one. If no funcid is given, the", "funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id", "be a function that returns a Python iterator (e.g. generator) that provides values", "self.source_func = wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If the routine is", "funcid): \"\"\"Force this function to iterate once.\"\"\" routine = self._get(funcid) return routine.step() #", "% funcid) def step(self, funcid): \"\"\"Force this function to iterate once.\"\"\" routine =", "should be a function that returns a Python iterator (e.g. generator) that provides", "_get(self, funcid): if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\"", "self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if funcid in self.__routines: return self.__routines[funcid]", "in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine =", "instance _copool = CoPool() add = _copool.add pause = _copool.pause pause_all = _copool.pause_all", "%r\" % funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys():", "%r\" % (func, funcid)) routine = _Routine(self, func, funcid, priority, timeout, args, kwargs)", "% funcid) def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid)", "until it should stop being called. Optional Keyword Arguments: priority -- priority to", "use timeout_add (with given timeout) instead of idle_add (in milliseconds) Only one function", "is given, the function itself is used. The funcid must be usable as", "GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if self.paused:", "self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function", "func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args, kwargs).next", "hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority", "kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id %r\" % (func, funcid)) routine", "it under the terms of the GNU General Public License version 2 as", "routine = self._get(funcid) return routine.step() # global instance _copool = CoPool() add =", "kwargs).next @property def paused(self): \"\"\"If the routine is currently running\"\"\" return self._source_id is", "returns a Python iterator (e.g. generator) that provides values until it should stop", "if funcid in self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid)", "def step(self): \"\"\"Raises StopIteration if the routine has nothing more to do\"\"\" return", "iterate once.\"\"\" routine = self._get(funcid) return routine.step() # global instance _copool = CoPool()", "__init__(self): self.__routines = {} def add(self, func, *args, **kwargs): \"\"\"Register a routine to", "by the Free Software Foundation \"\"\"Manage a pool of routines using Python iterators.\"\"\"", "self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\"", "self.__routines: return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid) def remove(self, funcid):", "do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already running do nothing\"\"\" if not", "Starting a new function with the same ID will stop the old one.", "must be usable as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if", "priority -- priority to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for", "routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" % funcid) def", "paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def", "priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused,", "function that returns a Python iterator (e.g. generator) that provides values until it", "def __init__(self): self.__routines = {} def add(self, func, *args, **kwargs): \"\"\"Register a routine", "remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed", "routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered", "_copool = CoPool() add = _copool.add pause = _copool.pause pause_all = _copool.pause_all remove", "funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW)", "Free Software Foundation \"\"\"Manage a pool of routines using Python iterators.\"\"\" from gi.repository", "milliseconds) Only one function with the same funcid can be running at once.", "for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\"", "function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for", "timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id %r\" % (func,", "<NAME>, <NAME> # 2014 <NAME> # # This program is free software; you", "wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If the routine is currently running\"\"\"", "you can redistribute it and/or modify # it under the terms of the", "\"\"\"Pause, if already paused, do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None", "Software Foundation \"\"\"Manage a pool of routines using Python iterators.\"\"\" from gi.repository import", "if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None)", "kwargs): for value in func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func =", "if the routine has nothing more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume,", "that returns a Python iterator (e.g. generator) that provides values until it should", "False self.source_func = wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If the routine", "True pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args, kwargs).next @property def paused(self):", "\"\"\"Manage a pool of routines using Python iterators.\"\"\" from gi.repository import GLib class", "is used. The funcid must be usable as a hash key. \"\"\" funcid", "_Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self,", "self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if", "can be running at once. Starting a new function with the same ID", "remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r", "currently running\"\"\" return self._source_id is None def step(self): \"\"\"Raises StopIteration if the routine", "= self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" % funcid) def step(self, funcid):", "self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def", "routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid)", "Optional Keyword Arguments: priority -- priority to run at (default GLib.PRIORITY_LOW) funcid --", "self.timeout = timeout self._source_id = None def wrap(func, funcid, args, kwargs): for value", "global instance _copool = CoPool() add = _copool.add pause = _copool.pause pause_all =", "pause = _copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume", "priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with", "free software; you can redistribute it and/or modify # it under the terms", "args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if funcid in self.__routines:", "terms of the GNU General Public License version 2 as # published by", "iterators.\"\"\" from gi.repository import GLib class _Routine(object): def __init__(self, pool, func, funcid, priority,", "routine is currently running\"\"\" return self._source_id is None def step(self): \"\"\"Raises StopIteration if", "# Copyright 2006 <NAME>, <NAME> # 2014 <NAME> # # This program is", "more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already running do nothing\"\"\"", "it should stop being called. Optional Keyword Arguments: priority -- priority to run", "(default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function timeout -- use timeout_add", "yield False self.source_func = wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If the", "timeout) instead of idle_add (in milliseconds) Only one function with the same funcid", "= self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid) def", "funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if", "= None def wrap(func, funcid, args, kwargs): for value in func(*args, **kwargs): yield", "%r with id %r\" % (func, funcid)) routine = _Routine(self, func, funcid, priority,", "args, kwargs): for value in func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func", "id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all registered routines.\"\"\" for funcid", "<NAME> # # This program is free software; you can redistribute it and/or", "of idle_add (in milliseconds) Only one function with the same funcid can be", "self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume()", "function itself is used. The funcid must be usable as a hash key.", "funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function", "in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool", "return self._source_id is None def step(self): \"\"\"Raises StopIteration if the routine has nothing", "GNU General Public License version 2 as # published by the Free Software", "pause_all = _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume = _copool.resume step", "pooled routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine", "= _Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def", "the routine is currently running\"\"\" return self._source_id is None def step(self): \"\"\"Raises StopIteration", "import GLib class _Routine(object): def __init__(self, pool, func, funcid, priority, timeout, args, kwargs):", "return self.__routines[funcid] raise ValueError(\"no pooled routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop", "= GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already paused, do nothing\"\"\" if", "# it under the terms of the GNU General Public License version 2", "_copool.add pause = _copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all", "routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine =", "self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid) def remove_all(self):", "(in milliseconds) Only one function with the same funcid can be running at", "self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id =", "in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid)", "GLib main loop. func should be a function that returns a Python iterator", "ValueError(\"no pooled routine %r\" % funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\"", "new function with the same ID will stop the old one. If no", "one. If no funcid is given, the function itself is used. The funcid", "generator) that provides values until it should stop being called. Optional Keyword Arguments:", "self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self): self.__routines = {}", "to run in GLib main loop. func should be a function that returns", "-- use timeout_add (with given timeout) instead of idle_add (in milliseconds) Only one", "remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid):", "be running at once. Starting a new function with the same ID will", "priority self.timeout = timeout self._source_id = None def wrap(func, funcid, args, kwargs): for", "@property def paused(self): \"\"\"If the routine is currently running\"\"\" return self._source_id is None", "print_d(\"Added copool function %r with id %r\" % (func, funcid)) routine = _Routine(self,", "and/or modify # it under the terms of the GNU General Public License", "the Free Software Foundation \"\"\"Manage a pool of routines using Python iterators.\"\"\" from", "routine.resume() print_d(\"Resumed copool function id %r\" % funcid) def step(self, funcid): \"\"\"Force this", "= kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id", "routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" % funcid) def step(self,", "a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" %", "routine has nothing more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already", "do nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self):", "paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed copool function id %r\" % funcid)", "CoPool() add = _copool.add pause = _copool.pause pause_all = _copool.pause_all remove = _copool.remove", "routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function id %r\" %", "GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id %r\" %", "key. \"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority =", "is currently running\"\"\" return self._source_id is None def step(self): \"\"\"Raises StopIteration if the", "self.priority = priority self.timeout = timeout self._source_id = None def wrap(func, funcid, args,", "paused(self): \"\"\"If the routine is currently running\"\"\" return self._source_id is None def step(self):", "used. The funcid must be usable as a hash key. \"\"\" funcid =", "= None class CoPool(object): def __init__(self): self.__routines = {} def add(self, func, *args,", "being called. Optional Keyword Arguments: priority -- priority to run at (default GLib.PRIORITY_LOW)", "return self.source_func() def resume(self): \"\"\"Resume, if already running do nothing\"\"\" if not self.paused:", "mutex/removal identifier for this function timeout -- use timeout_add (with given timeout) instead", "= GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self):", "self._source_id is None def step(self): \"\"\"Raises StopIteration if the routine has nothing more", "can redistribute it and/or modify # it under the terms of the GNU", "None def wrap(func, funcid, args, kwargs): for value in func(*args, **kwargs): yield True", "funcid must be usable as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\", func)", "\"\"\"Raises StopIteration if the routine has nothing more to do\"\"\" return self.source_func() def", "None class CoPool(object): def __init__(self): self.__routines = {} def add(self, func, *args, **kwargs):", "args, kwargs).next @property def paused(self): \"\"\"If the routine is currently running\"\"\" return self._source_id", "%r\" % funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid)", "License version 2 as # published by the Free Software Foundation \"\"\"Manage a", "self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine = self._get(funcid)", "function timeout -- use timeout_add (with given timeout) instead of idle_add (in milliseconds)", "routine.step() # global instance _copool = CoPool() add = _copool.add pause = _copool.pause", "if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self): self.__routines =", "class _Routine(object): def __init__(self, pool, func, funcid, priority, timeout, args, kwargs): self.priority =", "for this function timeout -- use timeout_add (with given timeout) instead of idle_add", "step(self, funcid): \"\"\"Force this function to iterate once.\"\"\" routine = self._get(funcid) return routine.step()", "\"\"\"Register a routine to run in GLib main loop. func should be a", "routine to run in GLib main loop. func should be a function that", "loop. func should be a function that returns a Python iterator (e.g. generator)", "has nothing more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already running", "iterator (e.g. generator) that provides values until it should stop being called. Optional", "kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout =", "itself is used. The funcid must be usable as a hash key. \"\"\"", "priority, timeout, args, kwargs): self.priority = priority self.timeout = timeout self._source_id = None", "resume(self): \"\"\"Resume, if already running do nothing\"\"\" if not self.paused: return if self.timeout:", "all registered routines.\"\"\" for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a", "id %r\" % funcid) def step(self, funcid): \"\"\"Force this function to iterate once.\"\"\"", "if not self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else:", "nothing\"\"\" if self.paused: return GLib.source_remove(self._source_id) self._source_id = None class CoPool(object): def __init__(self): self.__routines", "def __init__(self, pool, func, funcid, priority, timeout, args, kwargs): self.priority = priority self.timeout", "kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if funcid in self.__routines: return", "funcid in self.__routines.keys(): self.remove(funcid) def pause(self, funcid): \"\"\"Temporarily pause a registered routine.\"\"\" routine", "funcid can be running at once. Starting a new function with the same", "redistribute it and/or modify # it under the terms of the GNU General", "= timeout self._source_id = None def wrap(func, funcid, args, kwargs): for value in", "wrap(func, funcid, args, kwargs): for value in func(*args, **kwargs): yield True pool.remove(funcid) yield", "is free software; you can redistribute it and/or modify # it under the", "as # published by the Free Software Foundation \"\"\"Manage a pool of routines", "given timeout) instead of idle_add (in milliseconds) Only one function with the same", "to iterate once.\"\"\" routine = self._get(funcid) return routine.step() # global instance _copool =", "running\"\"\" return self._source_id is None def step(self): \"\"\"Raises StopIteration if the routine has", "copool function %r with id %r\" % (func, funcid)) routine = _Routine(self, func,", "The funcid must be usable as a hash key. \"\"\" funcid = kwargs.pop(\"funcid\",", "\"\"\"Force this function to iterate once.\"\"\" routine = self._get(funcid) return routine.step() # global", "funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del", "yield True pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args, kwargs).next @property def", "\"\"\"Resume, if already running do nothing\"\"\" if not self.paused: return if self.timeout: self._source_id", "% funcid) def remove(self, funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause()", "idle_add (in milliseconds) Only one function with the same funcid can be running", "self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause", "run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function timeout --", "routine.pause() print_d(\"Paused copool function id %r\" % funcid) def pause_all(self): \"\"\"Temporarily pause all", "identifier for this function timeout -- use timeout_add (with given timeout) instead of", "def remove_all(self): \"\"\"Stop all running routines.\"\"\" for funcid in self.__routines.keys(): self.remove(funcid) def pause(self,", "_copool.pause pause_all = _copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume = _copool.resume", "priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid): if funcid", "func, funcid, priority, timeout, args, kwargs): self.priority = priority self.timeout = timeout self._source_id", "\"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool function", "id %r\" % (func, funcid)) routine = _Routine(self, func, funcid, priority, timeout, args,", "Only one function with the same funcid can be running at once. Starting", "the function itself is used. The funcid must be usable as a hash", "{} def add(self, func, *args, **kwargs): \"\"\"Register a routine to run in GLib", "2006 <NAME>, <NAME> # 2014 <NAME> # # This program is free software;", "func, *args, **kwargs): \"\"\"Register a routine to run in GLib main loop. func", "funcid): \"\"\"Stop a registered routine.\"\"\" routine = self._get(funcid) routine.pause() del self.__routines[funcid] print_d(\"Removed copool", "\"\"\" funcid = kwargs.pop(\"funcid\", func) if funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\",", "self._source_id = None def wrap(func, funcid, args, kwargs): for value in func(*args, **kwargs):", "step(self): \"\"\"Raises StopIteration if the routine has nothing more to do\"\"\" return self.source_func()", "if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func,", "funcid)) routine = _Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine", "a routine to run in GLib main loop. func should be a function", "funcid) def step(self, funcid): \"\"\"Force this function to iterate once.\"\"\" routine = self._get(funcid)", "a Python iterator (e.g. generator) that provides values until it should stop being", "self._get(funcid) return routine.step() # global instance _copool = CoPool() add = _copool.add pause", "# 2014 <NAME> # # This program is free software; you can redistribute", "= kwargs.pop(\"timeout\", None) print_d(\"Added copool function %r with id %r\" % (func, funcid))", "Python iterator (e.g. generator) that provides values until it should stop being called.", "funcid is given, the function itself is used. The funcid must be usable", "nothing\"\"\" if not self.paused: return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority)", "in func(*args, **kwargs): yield True pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args,", "self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add( self.source_func, priority=self.priority) def pause(self): \"\"\"Pause, if already", "gi.repository import GLib class _Routine(object): def __init__(self, pool, func, funcid, priority, timeout, args,", "= wrap(func, funcid, args, kwargs).next @property def paused(self): \"\"\"If the routine is currently", "funcid, args, kwargs).next @property def paused(self): \"\"\"If the routine is currently running\"\"\" return", "main loop. func should be a function that returns a Python iterator (e.g.", "Arguments: priority -- priority to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier", "-- priority to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this", "return if self.timeout: self._source_id = GLib.timeout_add( self.timeout, self.source_func, priority=self.priority) else: self._source_id = GLib.idle_add(", "routine.\"\"\" routine = self._get(funcid) routine.pause() print_d(\"Paused copool function id %r\" % funcid) def", "should stop being called. Optional Keyword Arguments: priority -- priority to run at", "% (func, funcid)) routine = _Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid]", "funcid in self.__routines: remove(funcid) priority = kwargs.pop(\"priority\", GLib.PRIORITY_LOW) timeout = kwargs.pop(\"timeout\", None) print_d(\"Added", "priority to run at (default GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function", "ID will stop the old one. If no funcid is given, the function", "with the same funcid can be running at once. Starting a new function", "def paused(self): \"\"\"If the routine is currently running\"\"\" return self._source_id is None def", "the same ID will stop the old one. If no funcid is given,", "\"\"\"If the routine is currently running\"\"\" return self._source_id is None def step(self): \"\"\"Raises", "self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine = self._get(funcid) routine.resume() print_d(\"Resumed", "self.__routines[funcid] print_d(\"Removed copool function id %r\" % funcid) def remove_all(self): \"\"\"Stop all running", "the routine has nothing more to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if", "funcid, priority, timeout, args, kwargs): self.priority = priority self.timeout = timeout self._source_id =", "program is free software; you can redistribute it and/or modify # it under", "__init__(self, pool, func, funcid, priority, timeout, args, kwargs): self.priority = priority self.timeout =", "This program is free software; you can redistribute it and/or modify # it", "(e.g. generator) that provides values until it should stop being called. Optional Keyword", "**kwargs): yield True pool.remove(funcid) yield False self.source_func = wrap(func, funcid, args, kwargs).next @property", "self.__routines = {} def add(self, func, *args, **kwargs): \"\"\"Register a routine to run", "_Routine(object): def __init__(self, pool, func, funcid, priority, timeout, args, kwargs): self.priority = priority", "a new function with the same ID will stop the old one. If", "func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume() def _get(self, funcid):", "# # This program is free software; you can redistribute it and/or modify", "routine = _Routine(self, func, funcid, priority, timeout, args, kwargs) self.__routines[funcid] = routine routine.resume()", "StopIteration if the routine has nothing more to do\"\"\" return self.source_func() def resume(self):", "to do\"\"\" return self.source_func() def resume(self): \"\"\"Resume, if already running do nothing\"\"\" if", "timeout_add (with given timeout) instead of idle_add (in milliseconds) Only one function with", "_copool.pause_all remove = _copool.remove remove_all = _copool.remove_all resume = _copool.resume step = _copool.step", "GLib.PRIORITY_LOW) funcid -- mutex/removal identifier for this function timeout -- use timeout_add (with", "function with the same ID will stop the old one. If no funcid", "add(self, func, *args, **kwargs): \"\"\"Register a routine to run in GLib main loop.", "from gi.repository import GLib class _Routine(object): def __init__(self, pool, func, funcid, priority, timeout,", "for funcid in self.__routines.keys(): self.pause(funcid) def resume(self, funcid): \"\"\"Resume a paused routine.\"\"\" routine" ]
[]
[ "from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from", "setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser',", "def test_login_null_password(self): data = { 'username': 'testuser' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['password']),", "serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username': 'testuser' } serializer", "{ 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data", "def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username':", "'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password':", "def test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data)", "test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1)", "serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer", "self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid())", "= User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>'", "} serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>' }", "data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def", "= serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username': 'testuser' }", "serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data)", "= serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer =", "serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username': 'testuser'", "def test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data)", "serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>'", "import User from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of", "'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data =", "from django.contrib.auth.models import User from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all", "serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer", "coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models", "= serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' }", "unicode_literals from django.test import TestCase from django.contrib.auth.models import User from api import serializers", "'<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser',", "import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\" def setUp(self):", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase from", "= { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self):", "User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' }", "} serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username':", "'<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser',", "= { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self):", "data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def", "utf-8 -*- from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import", "of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data", "Tests all parameters of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\")", "LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data =", "django.test import TestCase from django.contrib.auth.models import User from api import serializers class LoginSerializerTest(TestCase):", "self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username': 'testuser' } serializer =", "serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\" def setUp(self): self.user", "= { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self):", "import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from api import", "self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = { 'username': 'testuser' } serializer = serializers.LoginSerializer(data=data)", "serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer", "__future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from api", "def test_login_null_username(self): data = { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']),", "test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid())", "self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer =", "'<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = { 'password': '<PASSWORD>'", "'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = {", "'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data =", "} serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser', 'password':", "test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid())", "data = { 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def", "1) def test_login_null_password(self): data = { 'username': 'testuser' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid())", "LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\",", "self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser', 'password':", "'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username':", "'<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data = {", "parameters of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self):", "= serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' }", "{ 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data", "\"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = {", "TestCase from django.contrib.auth.models import User from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests", "'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data =", "'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username':", "{ 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data", "} serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password':", "'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['username']), 1) def test_login_null_password(self): data =", "data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def", "\"\"\" Tests all parameters of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\",", "test_login_null_password(self): data = { 'username': 'testuser' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) self.assertEqual(len(serializer.errors['password']), 1)", "api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\" def", "'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_null_username(self): data = {", "<filename>fyp/api/tests/serializers/test_login_serializer.py<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import", "email=\"<EMAIL>\", password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer", "all parameters of LoginSerializer \"\"\" def setUp(self): self.user = User.objects.create_user(username=\"testuser\", email=\"<EMAIL>\", password=\"<PASSWORD>\") def", "'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = {", "{ 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data", "serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>'", "-*- from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User", "class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\" def setUp(self): self.user =", "def test_login_wrong_username(self): data = { 'username': 'NOTtestuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data)", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase", "from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer \"\"\"", "test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid())", "password=\"<PASSWORD>\") def test_login_success(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer =", "self.assertFalse(serializer.is_valid()) def test_login_wrong_password(self): data = { 'username': 'testuser', 'password': '<PASSWORD>' } serializer =", "User from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters of LoginSerializer", "django.contrib.auth.models import User from api import serializers class LoginSerializerTest(TestCase): \"\"\" Tests all parameters", "from django.test import TestCase from django.contrib.auth.models import User from api import serializers class", "= { 'username': 'testuser', 'password': '<PASSWORD>' } serializer = serializers.LoginSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_login_wrong_username(self):", "import TestCase from django.contrib.auth.models import User from api import serializers class LoginSerializerTest(TestCase): \"\"\"" ]
[ "= save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x is", "CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump')", "res = url.info() # assert res.size > len(\"samplefile\") and res.created_rev == 1155 #", "= url.info() # assert res.size > len(\"samplefile\") and res.created_rev == 1155 # the", "url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist()", "#def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): #", "cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir =", "test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self):", "# assert url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) #", "repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42", "except: repo.remove() raise if sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/')", "= cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1", "# assert url.rev == None # assert url.strpath == self.root.strpath # url =", "> len(\"samplefile\") and res.created_rev == 1155 # the following tests are easier if", "py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create", "self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) #", "class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing properties", "repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print", "== self.root.strpath def _test_getreporev(self): \"\"\" this test runs so slow it's usually disabled", "py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc(): repo, wc = getrepowc() repo", "= py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have read only files on", "test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries: assert logentry.rev", "cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url", "it's usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url,", "py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x", "#assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\"", "repo into\", wc else: print \"using repository at\", repo wc = py.path.svnwc(wcdir) return", "py import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import", "repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc(): repo, wc", "= cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos = old #cache.repositories.put(svnrepourl,", "py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump", "hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need", "# XXX we need to test more normalizing properties url = self.root.join(\"/\") assert", "repo) print \"checked out new repo into\", wc else: print \"using repository at\",", "and res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for", "= wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc =", "url, rev = repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath def", "py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo", "print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories", "not found\") return svnbin # make a wc directory out of a given", "repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x in 'test_remove',", "assert url.rev == None # assert url.strpath == self.root.strpath # url = self.root.new(rev=10)", "= self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url =", "None) if x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url =", "_repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url ==", "assert rev>=0 assert url == svnrepourl finally: repositories.repos = old #cache.repositories.put(svnrepourl, 1200, 0)", "timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath", "self.root.strpath def _test_getreporev(self): \"\"\" this test runs so slow it's usually disabled \"\"\"", "= repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)):", "str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn =", "import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin =", "on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository", "self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x", "== 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): #", "== self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url,", "= py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is", "rev == -1 assert url == self.root.strpath def _test_getreporev(self): \"\"\" this test runs", "self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): #", "assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we", "assert url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert url.rev == 10", "getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() #", "out of a given root url # cache previously obtained wcs! # def", "newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl,", "= py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s'", "self.root.new(rev=None) # assert url.rev == None # assert url.strpath == self.root.strpath # url", "rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout", "wc) def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo", "repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc():", "test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing properties url = self.root.join(\"/\")", "repo, wc = getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo", "% repo, wc) def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert", "assert url == self.root.strpath def _test_getreporev(self): \"\"\" this test runs so slow it's", "savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes and return the url", "py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None:", "= 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1 assert", "== 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\")", "repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1 assert url ==", "== 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout =", "binary not found\") return svnbin # make a wc directory out of a", "getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not found\") return", "new repo into\", wc else: print \"using repository at\", repo wc = py.path.svnwc(wcdir)", "= getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save')", "# repositories have read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo)", "this test runs so slow it's usually disabled \"\"\" old = cache.repositories.repos try:", "assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url =", "wc.checkout(url='file://%s' % repo) print \"checked out new repo into\", wc else: print \"using", "repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def", "wc = getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert", "<\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir)", "save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x is not", "make a wc directory out of a given root url # cache previously", "url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res = url.info() #", "rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath def _test_getreporev(self):", "root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl", "# url = self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): # url", "slow it's usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1)", "return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print", "from py import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn", "== 'win32': repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class", "url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath def", "repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache =", "url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url", "windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for", "# url = url.join(\"samplefile\") # res = url.info() # assert res.size > len(\"samplefile\")", "10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") #", "if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin", "we need to test more normalizing properties url = self.root.join(\"/\") assert self.root ==", "url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url =", "CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted':", "42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath", "teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x)", "test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self):", "import py from py import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs", "for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self,", "return (\"file://%s\" % repo, wc) def save_repowc(): repo, wc = getrepowc() repo =", "url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl,", "py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump))", "into\", wc else: print \"using repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\"", "== -1 assert url == self.root.strpath def _test_getreporev(self): \"\"\" this test runs so", "so slow it's usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root =", "self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert", "= cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev", "class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert", "meth): x = getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x) del", "= py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' %", "cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn')", "a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev =", "need to test more normalizing properties url = self.root.join(\"/\") assert self.root == url", "cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos = old #cache.repositories.put(svnrepourl, 1200,", "0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1 assert url", "value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style']", "to test more normalizing properties url = self.root.join(\"/\") assert self.root == url #def", "assert url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url", "= repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache", "py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin", "svnbin is None: py.test.skip(\"svn binary not found\") return svnbin # make a wc", "# def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir():", "wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc =", "is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value =", "try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url", "repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev ==", "import sys import py from py import path, test, process from py.__.path.testing.fscommon import", "== 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] ==", "else: print \"using repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo,", "'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x =", "url = url.join(\"samplefile\") # res = url.info() # assert res.size > len(\"samplefile\") and", "self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res = url.info() # assert res.size", "the following tests are easier if we have a path class def test_repocache_simple(self):", "if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo)", "print repo assert repo.check() # repositories have read only files on windows #repo.chmod(0777,", "res.created_rev == 1155 # the following tests are easier if we have a", "'win32': repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests):", "rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT,", "for logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry,", "test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath", "cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert", "+ str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out new repo into\", wc", "empty repository for testing purposes and return the url to it def make_test_repo(name=\"test-repository\"):", "= self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries: assert logentry.rev == 1", "<filename>py/path/svn/testing/svntestbase.py import sys import py from py import path, test, process from py.__.path.testing.fscommon", "str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out new repo into\", wc else:", "rev = repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath def test_repocache_outdated(self):", "def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native' def", "= self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally:", "return svnbin # make a wc directory out of a given root url", "return the url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create", "py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo,", "assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc", ">= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for logentry in", "def test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing properties url =", "a given root url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'):", "test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev", "\"\"\" old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath)", "we have a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url,", "restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):])", "0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries:", "42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0", "(\"file://%s\" % repo, wc) def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):])", "url = self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): # url =", "self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\")", "assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def", "cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev ==", "# newpath = self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert self.root.check()", "repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo,", "del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value ==", "setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def", "def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo =", "= self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) #", "svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not found\") return svnbin", "import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump =", "py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print", "url = self.root.new(rev=None) # assert url.rev == None # assert url.strpath == self.root.strpath", "test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url", "properties url = self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath =", "# assert res.size > len(\"samplefile\") and res.created_rev == 1155 # the following tests", "wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check()", "#def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res", "not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load", "repo) except: repo.remove() raise if sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\',", "repositories have read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath)", "+ str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn", "rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes", "= self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self):", "repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache =", "repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/'", "out new repo into\", wc else: print \"using repository at\", repo wc =", "disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev =", "'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to", "logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests):", "svnbin # make a wc directory out of a given root url #", "wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo", "test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\") and res.created_rev", "url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev)", "'native' def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native'", "self.root.strpath # url = self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): #", "= self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\") and res.created_rev >= 0", "-1 assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0)", "try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if sys.platform == 'win32':", "= url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\") res =", "read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create", "make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise", "directory out of a given root url # cache previously obtained wcs! #", "py from py import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from", "wc else: print \"using repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" %", "# raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev ==", "# create an empty repository for testing purposes and return the url to", "XXX we need to test more normalizing properties url = self.root.join(\"/\") assert self.root", "more normalizing properties url = self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): #", "old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert", "\"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc =", "url.join(\"samplefile\") # res = url.info() # assert res.size > len(\"samplefile\") and res.created_rev ==", "#def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): #", "repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo),", "% repo) print \"checked out new repo into\", wc else: print \"using repository", "'native' def test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\")", "url = self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries: assert logentry.rev ==", "py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if sys.platform == 'win32': repo", "url.info() assert res.size > len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url =", "value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res", "= repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath def _test_getreporev(self): \"\"\"", "def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if x is not None:", "res = url.info() assert res.size > len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self):", "= getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x) del self._savedrepowc def", "repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url ==", "# make a wc directory out of a given root url # cache", "repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url", "previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname)", "rev=1155) # url = url.join(\"samplefile\") # res = url.info() # assert res.size >", "self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) #", "'/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out new repo into\",", "url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos =", "wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/')", "wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc()", "None: py.test.skip(\"svn binary not found\") return svnbin # make a wc directory out", "py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' %", "logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self):", "for testing purposes and return the url to it def make_test_repo(name=\"test-repository\"): repo =", "'/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth):", "== None # assert url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert", "obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if", "wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes and return", "def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries: assert", "== self.root.strpath # url = self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self):", "'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing", "setup_method(self, meth): bn = meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x):", "assert rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache()", "svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\",", "def setup_method(self, meth): bn = meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if", "purposes and return the url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try:", "= mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary", "wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\',", "of a given root url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo',", "getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo)", "= meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc()", "wc directory out of a given root url # cache previously obtained wcs!", "mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not", "#def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self):", "url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev", "sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo)", "url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None)", "at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc(): repo,", "are easier if we have a path class def test_repocache_simple(self): repocache = cache.RepoCache()", "\"checked out new repo into\", wc else: print \"using repository at\", repo wc", "#repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing", "= url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res =", "from py.__.path.svn import cache, svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin():", "repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn", "not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"'", "self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155)", "repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have read only files", "== -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42,", "= py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc(): repo, wc = getrepowc()", "repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes and", "self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native'", "rev == -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath,", "== self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev =", "self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert", "= py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if sys.platform", "url.log() for logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert", "raise if sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\"", "\"using repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def", "== 'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked", "repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert", "rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos = old", "test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0)", "from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath = py.magic.autopath()", "1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX", "-q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc", "repo.check() # repositories have read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove()", "assert res.size > len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\")", "an empty repository for testing purposes and return the url to it def", "only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an", "= url.join(\"samplefile\") # res = url.info() # assert res.size > len(\"samplefile\") and res.created_rev", "repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform ==", "= py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not found\") return svnbin #", "svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo =", "'/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name", "self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath", "def test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\") and", "print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have read", "it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except:", "if we have a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42)", "savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print repo[len(\"file://\"):]", "# url = self.root.new(rev=None) # assert url.rev == None # assert url.strpath ==", "py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not found\") return svnbin # make", "url.info() # assert res.size > len(\"samplefile\") and res.created_rev == 1155 # the following", "path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath)", "# url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res = url.info()", "= '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self,", "process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon mypath =", "CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test more normalizing properties url", "return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for", "url.rev == None # assert url.strpath == self.root.strpath # url = self.root.new(rev=10) #", "repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath)", "'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc',", "getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not", "repo.remove() raise if sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') return", "url = self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199)", "\"\"\" this test runs so slow it's usually disabled \"\"\" old = cache.repositories.repos", "url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self):", "savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo, savedwc)): repo, wc", "def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert", "assert repo.check() # repositories have read only files on windows #repo.chmod(0777, rec=True) repo.remove()", "res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\") res", "== 'native' def test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert res.size >", "repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath) assert rev == -1", "assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url,", "and res.created_rev == 1155 # the following tests are easier if we have", "# the following tests are easier if we have a path class def", "svncommon mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if", "# res = url.info() # assert res.size > len(\"samplefile\") and res.created_rev == 1155", "hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test more", "repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out new", "self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\") and res.created_rev >= 0 def", "repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have", "> len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries =", "class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x in 'test_remove', 'test_move',", "test runs so slow it's usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear()", "= self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500)", "== 1155 # the following tests are easier if we have a path", "mypath = py.magic.autopath() repodump = mypath.dirpath('repotest.dump') def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin", "given root url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo", "repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have read only", "is None: py.test.skip(\"svn binary not found\") return svnbin # make a wc directory", "% svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn", "create %s' % repo) except: repo.remove() raise if sys.platform == 'win32': repo =", "# assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir)", "def _test_getreporev(self): \"\"\" this test runs so slow it's usually disabled \"\"\" old", "assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert", "print \"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32':", "self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0 assert url == svnrepourl finally: repositories.repos", "assert self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath", "following tests are easier if we have a path class def test_repocache_simple(self): repocache", "tests are easier if we have a path class def test_repocache_simple(self): repocache =", "= url.log() for logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author')", "# assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url", "1155 # the following tests are easier if we have a path class", "logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date')", "url == self.root.strpath def _test_getreporev(self): \"\"\" this test runs so slow it's usually", "_test_getreporev(self): \"\"\" this test runs so slow it's usually disabled \"\"\" old =", "= '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out new repo", "files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty", "url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev == None #", "= py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return", "savedwc)): repo, wc = getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print", "have read only files on windows #repo.chmod(0777, rec=True) repo.remove() wc.localpath.remove() savedrepo.move(repo) savedwc.localpath.move(wc.localpath) #", "url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\") res = url.info()", "res.size > len(\"samplefile\") and res.created_rev == 1155 # the following tests are easier", "assert rev == -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache()", "None # assert url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert url.rev", "if sys.platform == 'win32': repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" %", "res.size > len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries", "rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev", "None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert", "py.test.skip(\"svn binary not found\") return svnbin # make a wc directory out of", "self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath !=", "repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if", "bn = meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc =", "py.path.local(repo[len(\"file://\"):]) print repo assert repo.check() # repositories have read only files on windows", "#def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev == None # assert", "= url.info() assert res.size > len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url", "newpath = self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert self.root.check() #def", "# url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url =", "repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save') savedwc", "rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath def test_repocache_notimeout(self):", "and return the url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin", "path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache, svncommon", "meth): bn = meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc", "= repocache.get(self.root.strpath) assert rev == -1 assert url == self.root.strpath def test_repocache_outdated(self): repocache", "url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath != self.root def", "print \"using repository at\", repo wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc)", "found\") return svnbin # make a wc directory out of a given root", "easier if we have a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath,", "\"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created", "usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev", "def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print repo[len(\"file://\"):] repo =", "repository for testing purposes and return the url to it def make_test_repo(name=\"test-repository\"): repo", "% (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if", "wc = py.path.svnwc(wcdir) return (\"file://%s\" % repo, wc) def save_repowc(): repo, wc =", "url = self.root.join(\"samplefile\") res = url.info() assert res.size > len(\"samplefile\") and res.created_rev >=", "self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\")", "save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check() savedrepo = repo.dirpath('repo_save')", "root url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo =", "cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert", "= cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert", "assert rev == -1 assert url == self.root.strpath def _test_getreporev(self): \"\"\" this test", "def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert", "x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value", "def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev =", "getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self):", "self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self):", "sys import py from py import path, test, process from py.__.path.testing.fscommon import CommonFSTests,", "!= self.root def test_exists_svn_root(self): assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500)", "py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo))", "= self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res = url.info() # assert", "test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url", "= self.root.new(rev=None) # assert url.rev == None # assert url.strpath == self.root.strpath #", "% repo) class CommonSvnTests(CommonFSTests): def setup_method(self, meth): bn = meth.func_name for x in", "assert value == 'native' def test_proplist(self): url = self.root.join(\"samplefile\") res = url.proplist() assert", "savedwc def restore_repowc((savedrepo, savedwc)): repo, wc = getrepowc() print repo print repo[len(\"file://\"):] repo", "in logentries: assert logentry.rev == 1 assert hasattr(logentry, 'author') assert hasattr(logentry, 'date') class", "len(\"samplefile\") and res.created_rev == 1155 # the following tests are easier if we", "a wc directory out of a given root url # cache previously obtained", "bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None) if", "test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev ==", "if svnbin is None: py.test.skip(\"svn binary not found\") return svnbin # make a", "'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self,", "wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"'", "len(\"samplefile\") and res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log()", "def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove()", "-1 assert url == self.root.strpath def _test_getreporev(self): \"\"\" this test runs so slow", "# cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir", "%s' % repo) except: repo.remove() raise if sys.platform == 'win32': repo = '/'", "repocache.get(self.root.join('test').strpath) assert rev == -1 assert url == self.root.strpath def _test_getreporev(self): \"\"\" this", "to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo)", "if x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\")", "py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' % repo) except: repo.remove() raise if sys.platform ==", "the url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s'", "testing purposes and return the url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name)", "assert res.size > len(\"samplefile\") and res.created_rev == 1155 # the following tests are", "'/') wc.checkout(url='file://%s' % repo) print \"checked out new repo into\", wc else: print", "test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath != self.root def test_exists_svn_root(self): assert", "not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style')", "repo, wc) def save_repowc(): repo, wc = getrepowc() repo = py.path.local(repo[len(\"file://\"):]) assert repo.check()", "= self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url =", "test_newrev(self): # url = self.root.new(rev=None) # assert url.rev == None # assert url.strpath", "url to it def make_test_repo(name=\"test-repository\"): repo = py.test.ensuretemp(name) try: py.process.cmdexec('svnadmin create %s' %", "== url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) # assert newpath != self.root", "savedrepo.move(repo) savedwc.localpath.move(wc.localpath) # create an empty repository for testing purposes and return the", "repo assert repo.check() # repositories have read only files on windows #repo.chmod(0777, rec=True)", "if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x = getattr(self, '_savedrepowc', None)", "logentries = url.log() for logentry in logentries: assert logentry.rev == 1 assert hasattr(logentry,", "wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' %", "42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert rev == -1 assert url ==", "def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value == 'native' def", "test more normalizing properties url = self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self):", "url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert url.rev == 10 #def", "repo = '/' + str(repo).replace('\\\\', '/') return py.path.svnurl(\"file://%s\" % repo) class CommonSvnTests(CommonFSTests): def", "def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath) assert", "'win32': repo = '/' + str(repo).replace('\\\\', '/') wc.checkout(url='file://%s' % repo) print \"checked out", "self.root.join(\"samplefile\") logentries = url.log() for logentry in logentries: assert logentry.rev == 1 assert", "self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev", "repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1)", "# assert url.strpath == self.root.strpath # url = self.root.new(rev=10) # assert url.rev ==", "assert self.root.check() #def test_not_exists_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # assert url.check(exists=0) #def", "wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not", "assert url.check(exists=0) #def test_nonexisting_listdir_rev(self): # url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def", "url, rev = repocache.get(self.root.join('test').strpath) assert rev == 42 assert url == self.root.strpath def", "url = self.root.join(\"samplefile\") res = url.proplist() assert res['svn:eol-style'] == 'native' def test_info(self): url", "= cache.repositories.repos try: _repocache.clear() root = self.root.new(rev=-1) url, rev = cache.repocache.get(root.strpath) assert rev>=0", "import path, test, process from py.__.path.testing.fscommon import CommonFSTests, setuptestfs from py.__.path.svn import cache,", "def getsvnbin(): svnbin = py.path.local.sysfind('svn') if svnbin is None: py.test.skip(\"svn binary not found\")", "= self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath = self.root.new(rev=1199) #", "create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print", "x = getattr(self, '_savedrepowc', None) if x is not None: restore_repowc(x) del self._savedrepowc", "load -q \"%s\" <\"%s\"' % (svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1)", "assert hasattr(logentry, 'date') class CommonCommandAndBindingTests(CommonSvnTests): def test_trailing_slash_is_stripped(self): # XXX we need to test", "assert url == self.root.strpath def test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath,", "'_savedrepowc', None) if x is not None: restore_repowc(x) del self._savedrepowc def test_propget(self): url", "= getrepowc() print repo print repo[len(\"file://\"):] repo = py.path.local(repo[len(\"file://\"):]) print repo assert repo.check()", "url # cache previously obtained wcs! # def getrepowc(reponame='basetestrepo', wcname='wc'): repo = py.test.ensuretemp(reponame)", "raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url = self.root.new(rev=None) # assert url.rev == None", "normalizing properties url = self.root.join(\"/\") assert self.root == url #def test_different_revs_compare_unequal(self): # newpath", "test_info_rev(self): # url = self.root.__class__(self.rooturl, rev=1155) # url = url.join(\"samplefile\") # res =", "x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth):", "restore_repowc(x) del self._savedrepowc def test_propget(self): url = self.root.join(\"samplefile\") value = url.propget('svn:eol-style') assert value", "res['svn:eol-style'] == 'native' def test_info(self): url = self.root.join(\"samplefile\") res = url.info() assert res.size", "create an empty repository for testing purposes and return the url to it", "def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev = repocache.get(self.root.join('test').strpath) assert rev", "= py.test.ensuretemp(reponame) wcdir = py.test.ensuretemp(wcname) if not repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin", "(svncommon._escape_helper(repo), repodump)) print \"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform", "runs so slow it's usually disabled \"\"\" old = cache.repositories.repos try: _repocache.clear() root", "test_repocache_notimeout(self): repocache = cache.RepoCache() repocache.timeout = 0 repocache.put(self.root.strpath, self.root.rev) url, rev = repocache.get(self.root.strpath)", "in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def teardown_method(self, meth): x", "res.created_rev >= 0 def test_log_simple(self): url = self.root.join(\"samplefile\") logentries = url.log() for logentry", "have a path class def test_repocache_simple(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42) url, rev", "repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo = '/' +", "print \"checked out new repo into\", wc else: print \"using repository at\", repo", "self.root.strpath def test_repocache_outdated(self): repocache = cache.RepoCache() repocache.put(self.root.strpath, 42, timestamp=0) url, rev = repocache.get(self.root.join('test').strpath)", "# url = self.root.__class__(self.rooturl, rev=500) # raises(py.error.ENOENT, url.listdir) #def test_newrev(self): # url =", "meth.func_name for x in 'test_remove', 'test_move', 'test_status_deleted': if bn.startswith(x): self._savedrepowc = save_repowc() def", "= self.root.new(rev=10) # assert url.rev == 10 #def test_info_rev(self): # url = self.root.__class__(self.rooturl,", "\"created svn repository\", repo wcdir.ensure(dir=1) wc = py.path.svnwc(wcdir) if py.std.sys.platform == 'win32': repo", "savedrepo = repo.dirpath('repo_save') savedwc = wc.dirpath('wc_save') repo.copy(savedrepo) wc.localpath.copy(savedwc.localpath) return savedrepo, savedwc def restore_repowc((savedrepo,", "% repo) except: repo.remove() raise if sys.platform == 'win32': repo = '/' +", "repo.listdir(): #assert not wcdir.check() repo.ensure(dir=1) py.process.cmdexec('svnadmin create \"%s\"' % svncommon._escape_helper(repo)) py.process.cmdexec('svnadmin load -q" ]
[ "y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic) r = (x**2+y**2*q**-2)**0.5", "numpy as np class Sersic: def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1", "def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff,", "import numpy as np class Sersic: def b(self,n): return 1.9992*n - 0.3271 +", "def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic) r", "return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q,", "0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn", "np class Sersic: def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x,", "4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic)", "- 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0):", "as np class Sersic: def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def", "1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0,", "kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic) r =", "n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic) r = (x**2+y**2*q**-2)**0.5 return", "+ 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0): bn =", "b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic, r_eff, k_eff,", "Sersic: def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y, n_sersic,", "r_eff, k_eff, q, center_x=0, center_y=0): bn = self.b(n_sersic) r = (x**2+y**2*q**-2)**0.5 return k_eff*np.exp(-bn*((r*r_eff**-1)**(n_sersic**-1)-1))", "class Sersic: def b(self,n): return 1.9992*n - 0.3271 + 4*(405*n)**-1 def kappa(self,x, y," ]
[ "handle with client requirements. This client handler is with a strong bond to", "False self.stop = False async def commandsHandler(self ,client,path): app = None try: print(\"Client", "client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if self.initialize: data = app.get_data() res", "client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App is", "{\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App", "##################################### # TODO import WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize", "import socket from imp import load_source import WalabotBreathing as breathing import WalabotTracker as", "and Tracker Follow TODO to add a new project ''' import asyncio import", "start() get_data() stop() Feel free to add your uwn function just note that", "except: res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else:", "attached : Breathing and Tracker Follow TODO to add a new project '''", "WalabotHandler: def __init__(self): self.initialize = False self.stop = False async def commandsHandler(self ,client,path):", "is with a strong bond to NewWalabotAppTemplate.py requirements. Basic functions that must be:", "import WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize = False self.stop", "requirements. This client handler is with a strong bond to NewWalabotAppTemplate.py requirements. Basic", "as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize = False self.stop = False", "breathing elif command == \"TRACKER\": print(command) app = tracker ############################ ADD YOUR APP", "NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await", "is NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"}", "This is a asynchronous function to handle with client requirements. This client handler", "WalabotTracker as tracker ##################################### # TODO import WalabotMyApp as my_app ##################################### class WalabotHandler:", "that you also support client-side commands. The information is transmitted in json. The", "WalabotBreathing as breathing import WalabotTracker as tracker ##################################### # TODO import WalabotMyApp as", "tracker ##################################### # TODO import WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self):", "strong bond to NewWalabotAppTemplate.py requirements. Basic functions that must be: start() get_data() stop()", "if not self.initialize: self.initialize = True app.start() elif command == 'STOP': print(command) app.stop()", "as e: print(\"Connection problem\" + str(e)) res = {\"Command\": \"ERROR\", \"Message\": str(e)} await", "handler is with a strong bond to NewWalabotAppTemplate.py requirements. Basic functions that must", "app = tracker ############################ ADD YOUR APP HERE ################################## # TODO elif command", "# TODO import WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize =", "<reponame>Walabot-Projects/Walabot-WebSocketServer ''' This is a asynchronous function to handle with client requirements. This", "try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await client.recv()) command =", "command = data['Command'] if command == 'BREATHING': print(command) app = breathing elif command", "await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\":", "client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" + str(e)) res = {\"Command\": \"ERROR\",", "# app = my_app ################################################################################## if not self.initialize: self.initialize = True app.start() elif", "await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except", "elif command == \"MY_APP\": # print(command) # app = my_app ################################################################################## if not", "add your uwn function just note that you also support client-side commands. The", "{\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\"", "class WalabotHandler: def __init__(self): self.initialize = False self.stop = False async def commandsHandler(self", "not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await", "app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await", "except Exception as e: print(\"Connection problem\" + str(e)) res = {\"Command\": \"ERROR\", \"Message\":", "print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if self.initialize:", "must be: start() get_data() stop() Feel free to add your uwn function just", "= {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection", "False async def commandsHandler(self ,client,path): app = None try: print(\"Client connected..\") print(str(self.stop)) while", "import load_source import WalabotBreathing as breathing import WalabotTracker as tracker ##################################### # TODO", "new project ''' import asyncio import websockets import json import socket from imp", "be: start() get_data() stop() Feel free to add your uwn function just note", "def commandsHandler(self ,client,path): app = None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop:", "'DATA': try: if self.initialize: data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] ,", "\"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" + str(e)) res", "{\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are attached : Breathing and Tracker", "tracker ############################ ADD YOUR APP HERE ################################## # TODO elif command == \"MY_APP\":", "app = my_app ################################################################################## if not self.initialize: self.initialize = True app.start() elif command", "self.initialize: self.initialize = True app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False await", "The information is transmitted in json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]}", "requirements. Basic functions that must be: start() get_data() stop() Feel free to add", "to add a new project ''' import asyncio import websockets import json import", "your uwn function just note that you also support client-side commands. The information", "\"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\",", "== 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try:", ": Breathing and Tracker Follow TODO to add a new project ''' import", "ADD YOUR APP HERE ################################## # TODO elif command == \"MY_APP\": # print(command)", "client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception", "with a strong bond to NewWalabotAppTemplate.py requirements. Basic functions that must be: start()", "to NewWalabotAppTemplate.py requirements. Basic functions that must be: start() get_data() stop() Feel free", "if self.initialize: data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"}", "The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are attached : Breathing", "examples are attached : Breathing and Tracker Follow TODO to add a new", "client requirements. This client handler is with a strong bond to NewWalabotAppTemplate.py requirements.", "\"\", \"Message\": \"\", Params\":[data]} Two examples are attached : Breathing and Tracker Follow", "asyncio import websockets import json import socket from imp import load_source import WalabotBreathing", "import websockets import json import socket from imp import load_source import WalabotBreathing as", "= False async def commandsHandler(self ,client,path): app = None try: print(\"Client connected..\") print(str(self.stop))", "\"App is NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown", "res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\":", "async def commandsHandler(self ,client,path): app = None try: print(\"Client connected..\") print(str(self.stop)) while not", "command == \"TRACKER\": print(command) app = tracker ############################ ADD YOUR APP HERE ##################################", "print(command) # app = my_app ################################################################################## if not self.initialize: self.initialize = True app.start()", "elif command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command ==", "commandsHandler(self ,client,path): app = None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data", "Two examples are attached : Breathing and Tracker Follow TODO to add a", "NewWalabotAppTemplate.py requirements. Basic functions that must be: start() get_data() stop() Feel free to", "= data['Command'] if command == 'BREATHING': print(command) app = breathing elif command ==", "not self.stop: data = json.loads(await client.recv()) command = data['Command'] if command == 'BREATHING':", "elif command == \"TRACKER\": print(command) app = tracker ############################ ADD YOUR APP HERE", "import WalabotTracker as tracker ##################################### # TODO import WalabotMyApp as my_app ##################################### class", "# TODO elif command == \"MY_APP\": # print(command) # app = my_app ##################################################################################", "e: print(\"Connection problem\" + str(e)) res = {\"Command\": \"ERROR\", \"Message\": str(e)} await client.send(json.dumps(res))", "just note that you also support client-side commands. The information is transmitted in", "load_source import WalabotBreathing as breathing import WalabotTracker as tracker ##################################### # TODO import", "None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await client.recv()) command", "self.stop: data = json.loads(await client.recv()) command = data['Command'] if command == 'BREATHING': print(command)", "await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if self.initialize: data = app.get_data()", "to handle with client requirements. This client handler is with a strong bond", "else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\":", "app = None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await", "are attached : Breathing and Tracker Follow TODO to add a new project", "my_app ##################################### class WalabotHandler: def __init__(self): self.initialize = False self.stop = False async", "is transmitted in json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples", "connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await client.recv()) command = data['Command'] if", "print(str(self.stop)) while not self.stop: data = json.loads(await client.recv()) command = data['Command'] if command", "= {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\":", "\"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"}))", "app = breathing elif command == \"TRACKER\": print(command) app = tracker ############################ ADD", "\"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\":", "Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" + str(e)) res =", ",client,path): app = None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data =", "get_data() stop() Feel free to add your uwn function just note that you", "support client-side commands. The information is transmitted in json. The protocol: {\"Command\": \"\",", "self.initialize = False self.stop = False async def commandsHandler(self ,client,path): app = None", "client.recv()) command = data['Command'] if command == 'BREATHING': print(command) app = breathing elif", "websockets import json import socket from imp import load_source import WalabotBreathing as breathing", "== \"MY_APP\": # print(command) # app = my_app ################################################################################## if not self.initialize: self.initialize", "self.initialize = True app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\":", "== 'DATA': try: if self.initialize: data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data]", "print(command) app = breathing elif command == \"TRACKER\": print(command) app = tracker ############################", "client handler is with a strong bond to NewWalabotAppTemplate.py requirements. Basic functions that", "HERE ################################## # TODO elif command == \"MY_APP\": # print(command) # app =", "TODO to add a new project ''' import asyncio import websockets import json", "= True app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"}))", "defined\"} await client.send(json.dumps(res)) else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res))", "\"\", Params\":[data]} Two examples are attached : Breathing and Tracker Follow TODO to", "# print(command) # app = my_app ################################################################################## if not self.initialize: self.initialize = True", "import asyncio import websockets import json import socket from imp import load_source import", "command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA':", "''' import asyncio import websockets import json import socket from imp import load_source", "functions that must be: start() get_data() stop() Feel free to add your uwn", "res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else: res", "== 'BREATHING': print(command) app = breathing elif command == \"TRACKER\": print(command) app =", "a new project ''' import asyncio import websockets import json import socket from", "self.stop = False async def commandsHandler(self ,client,path): app = None try: print(\"Client connected..\")", "transmitted in json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are", "Exception as e: print(\"Connection problem\" + str(e)) res = {\"Command\": \"ERROR\", \"Message\": str(e)}", "Params\":[data]} Two examples are attached : Breathing and Tracker Follow TODO to add", "= json.loads(await client.recv()) command = data['Command'] if command == 'BREATHING': print(command) app =", "self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if self.initialize: data =", "\"EXIT\"})) elif command == 'DATA': try: if self.initialize: data = app.get_data() res =", "'BREATHING': print(command) app = breathing elif command == \"TRACKER\": print(command) app = tracker", "TODO import WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize = False", "print(command) app = tracker ############################ ADD YOUR APP HERE ################################## # TODO elif", "Follow TODO to add a new project ''' import asyncio import websockets import", "= False self.stop = False async def commandsHandler(self ,client,path): app = None try:", "print(\"Client connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await client.recv()) command = data['Command']", "##################################### class WalabotHandler: def __init__(self): self.initialize = False self.stop = False async def", "'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if", "json import socket from imp import load_source import WalabotBreathing as breathing import WalabotTracker", "command == 'DATA': try: if self.initialize: data = app.get_data() res = {\"Command\": \"DATA\",", "else: res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as", "try: if self.initialize: data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\":", "{\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else: res = {\"Command\":", "await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" + str(e)) res = {\"Command\":", "__init__(self): self.initialize = False self.stop = False async def commandsHandler(self ,client,path): app =", "asynchronous function to handle with client requirements. This client handler is with a", "stop() Feel free to add your uwn function just note that you also", "= app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else:", "information is transmitted in json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two", "Tracker Follow TODO to add a new project ''' import asyncio import websockets", "note that you also support client-side commands. The information is transmitted in json.", "import json import socket from imp import load_source import WalabotBreathing as breathing import", "initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res))", "\"TRACKER\": print(command) app = tracker ############################ ADD YOUR APP HERE ################################## # TODO", "import WalabotBreathing as breathing import WalabotTracker as tracker ##################################### # TODO import WalabotMyApp", "project ''' import asyncio import websockets import json import socket from imp import", "imp import load_source import WalabotBreathing as breathing import WalabotTracker as tracker ##################################### #", "YOUR APP HERE ################################## # TODO elif command == \"MY_APP\": # print(command) #", "you also support client-side commands. The information is transmitted in json. The protocol:", "\"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not", "= {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"} await client.send(json.dumps(res)) else: res =", "\"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT", "bond to NewWalabotAppTemplate.py requirements. Basic functions that must be: start() get_data() stop() Feel", ", \"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except:", "################################## # TODO elif command == \"MY_APP\": # print(command) # app = my_app", "data['Command'] if command == 'BREATHING': print(command) app = breathing elif command == \"TRACKER\":", "This client handler is with a strong bond to NewWalabotAppTemplate.py requirements. Basic functions", "that must be: start() get_data() stop() Feel free to add your uwn function", "in json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are attached", "protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are attached : Breathing and", "uwn function just note that you also support client-side commands. The information is", "await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App", "WalabotMyApp as my_app ##################################### class WalabotHandler: def __init__(self): self.initialize = False self.stop =", "function just note that you also support client-side commands. The information is transmitted", "############################ ADD YOUR APP HERE ################################## # TODO elif command == \"MY_APP\": #", "TODO elif command == \"MY_APP\": # print(command) # app = my_app ################################################################################## if", "app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command", "a asynchronous function to handle with client requirements. This client handler is with", "if command == 'BREATHING': print(command) app = breathing elif command == \"TRACKER\": print(command)", "also support client-side commands. The information is transmitted in json. The protocol: {\"Command\":", "from imp import load_source import WalabotBreathing as breathing import WalabotTracker as tracker #####################################", "= my_app ################################################################################## if not self.initialize: self.initialize = True app.start() elif command ==", "APP HERE ################################## # TODO elif command == \"MY_APP\": # print(command) # app", "a strong bond to NewWalabotAppTemplate.py requirements. Basic functions that must be: start() get_data()", "Breathing and Tracker Follow TODO to add a new project ''' import asyncio", "with client requirements. This client handler is with a strong bond to NewWalabotAppTemplate.py", "commands. The information is transmitted in json. The protocol: {\"Command\": \"\", \"Message\": \"\",", "== \"TRACKER\": print(command) app = tracker ############################ ADD YOUR APP HERE ################################## #", "elif command == 'DATA': try: if self.initialize: data = app.get_data() res = {\"Command\":", "\"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" + str(e))", "function to handle with client requirements. This client handler is with a strong", "= breathing elif command == \"TRACKER\": print(command) app = tracker ############################ ADD YOUR", "def __init__(self): self.initialize = False self.stop = False async def commandsHandler(self ,client,path): app", "= None try: print(\"Client connected..\") print(str(self.stop)) while not self.stop: data = json.loads(await client.recv())", "json. The protocol: {\"Command\": \"\", \"Message\": \"\", Params\":[data]} Two examples are attached :", "################################################################################## if not self.initialize: self.initialize = True app.start() elif command == 'STOP': print(command)", "command == \"MY_APP\": # print(command) # app = my_app ################################################################################## if not self.initialize:", "socket from imp import load_source import WalabotBreathing as breathing import WalabotTracker as tracker", "True app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif", "as tracker ##################################### # TODO import WalabotMyApp as my_app ##################################### class WalabotHandler: def", "\"Message\": \"\", Params\":[data]} Two examples are attached : Breathing and Tracker Follow TODO", "\"Message\": \"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res", "client-side commands. The information is transmitted in json. The protocol: {\"Command\": \"\", \"Message\":", "breathing import WalabotTracker as tracker ##################################### # TODO import WalabotMyApp as my_app #####################################", "while not self.stop: data = json.loads(await client.recv()) command = data['Command'] if command ==", "''' This is a asynchronous function to handle with client requirements. This client", "self.initialize: data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await", "add a new project ''' import asyncio import websockets import json import socket", "not self.initialize: self.initialize = True app.start() elif command == 'STOP': print(command) app.stop() self.initialize=False", "\"MY_APP\": # print(command) # app = my_app ################################################################################## if not self.initialize: self.initialize =", "Basic functions that must be: start() get_data() stop() Feel free to add your", "Feel free to add your uwn function just note that you also support", "free to add your uwn function just note that you also support client-side", "\"App not initialized\"})) except: res = {\"Command\": \"ERROR\", \"Message\": \"App is NOT defined\"}", "to add your uwn function just note that you also support client-side commands.", "data = app.get_data() res = {\"Command\": \"DATA\", \"Params\":[data] , \"Message\": \"\"} await client.send(json.dumps(res))", "client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res = {\"Command\": \"ERROR\",", "data = json.loads(await client.recv()) command = data['Command'] if command == 'BREATHING': print(command) app", "\"\"} await client.send(json.dumps(res)) else: await client.send(json.dumps({\"Command\": \"EXIT\",\"Message\": \"App not initialized\"})) except: res =", "\"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e: print(\"Connection problem\" +", "json.loads(await client.recv()) command = data['Command'] if command == 'BREATHING': print(command) app = breathing", "= tracker ############################ ADD YOUR APP HERE ################################## # TODO elif command ==", "res = {\"Command\": \"ERROR\", \"Message\": \"Unknown Command\"} await client.send(json.dumps(res)) except Exception as e:", "my_app ################################################################################## if not self.initialize: self.initialize = True app.start() elif command == 'STOP':", "command == 'BREATHING': print(command) app = breathing elif command == \"TRACKER\": print(command) app", "app.stop() self.initialize=False await client.send(json.dumps({\"Command\": \"EXIT\"})) elif command == 'DATA': try: if self.initialize: data", "is a asynchronous function to handle with client requirements. This client handler is", "as breathing import WalabotTracker as tracker ##################################### # TODO import WalabotMyApp as my_app" ]
[ "return self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs): self._files = {} files", "collections from attrdict import AttrDict from app_settings import file_search from app_settings import FileFactory", "\"_\", filename) def _validate(self, files, dir, resolve_type): if not files and not dir:", "in self._files: self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name) @property def files(self):", "list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs):", "_transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type): if not", "default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def", "filter, default, **kwargs) self._load_files() def save(self, config_name): if config_name in self._files: self._save_config(config_name) def", "self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name) @property def files(self): return list(self._files.keys())", "= self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir, filter): if isinstance(files, str):", "_create_files(self, files, dir, filter, default, **kwargs): self._files = {} files = self._get_files(files, dir,", "recursive=True) return [] def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def", "from app_settings import FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self, files=None, dir=None,", "files, dir, filter, default, **kwargs): self._files = {} files = self._get_files(files, dir, filter)", "f in files: _file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] =", "self._files: self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name) @property def files(self): return", "getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict", "not files and not dir: raise ValueError(\"No files or search directory provided.\") if", "save_all(self): for _name in self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self,", "self._load_files() def save(self, config_name): if config_name in self._files: self._save_config(config_name) def save_all(self): for _name", "AttrDict from app_settings import file_search from app_settings import FileFactory __all__ = [\"Config\"] class", "self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def", "def _validate(self, files, dir, resolve_type): if not files and not dir: raise ValueError(\"No", "_file.load()) def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name,", "def save_all(self): for _name in self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def", "return [] def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self,", "return [files] if isinstance(files, collections.Iterable): return files if dir: return file_search(dir, filter, recursive=True)", "_save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename)", "filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self,", "return file_search(dir, filter, recursive=True) return [] def _load_files(self): for _name, _file in self._files.items():", "__init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default,", "for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name)", "app_settings import file_search from app_settings import FileFactory __all__ = [\"Config\"] class Config(object): def", "attrdict import AttrDict from app_settings import file_search from app_settings import FileFactory __all__ =", "_add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict)", "= _file def _get_files(self, files, dir, filter): if isinstance(files, str): return [files] if", "provided.\") if files: if isinstance(files, collections.Iterable): for f in files: assert isinstance(f, str)", "**kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self, config_name):", "def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type): if", "filename) def _validate(self, files, dir, resolve_type): if not files and not dir: raise", "config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self,", "dir: raise ValueError(\"No files or search directory provided.\") if files: if isinstance(files, collections.Iterable):", "collections.Iterable): for f in files: assert isinstance(f, str) else: assert isinstance(files, str) if", "_validate(self, files, dir, resolve_type): if not files and not dir: raise ValueError(\"No files", "if isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return files if dir: return", "def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name))", "files: _file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def", "files if dir: return file_search(dir, filter, recursive=True) return [] def _load_files(self): for _name,", "return files if dir: return file_search(dir, filter, recursive=True) return [] def _load_files(self): for", "import FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None,", "return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type): if not files and", "isinstance(files, collections.Iterable): return files if dir: return file_search(dir, filter, recursive=True) return [] def", "__all__ = [\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files,", "if files: if isinstance(files, collections.Iterable): for f in files: assert isinstance(f, str) else:", "_name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir, filter): if isinstance(files,", "self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self, config_name): if", "isinstance(files, collections.Iterable): for f in files: assert isinstance(f, str) else: assert isinstance(files, str)", "in files: _file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file", "os, re, collections from attrdict import AttrDict from app_settings import file_search from app_settings", "directory provided.\") if files: if isinstance(files, collections.Iterable): for f in files: assert isinstance(f,", "**kwargs): self._files = {} files = self._get_files(files, dir, filter) for f in files:", "[] def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name):", "filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type): if not files", "from app_settings import file_search from app_settings import FileFactory __all__ = [\"Config\"] class Config(object):", "search directory provided.\") if files: if isinstance(files, collections.Iterable): for f in files: assert", "if dir: return file_search(dir, filter, recursive=True) return [] def _load_files(self): for _name, _file", "files: assert isinstance(f, str) else: assert isinstance(files, str) if dir: assert isinstance(dir, str)", "default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir, filter):", "in self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key)", "f in files: assert isinstance(f, str) else: assert isinstance(files, str) if dir: assert", "return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir, filter, default,", "**kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir, filter): if", "from attrdict import AttrDict from app_settings import file_search from app_settings import FileFactory __all__", "config_name): if config_name in self._files: self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name)", "isinstance(f, str) else: assert isinstance(files, str) if dir: assert isinstance(dir, str) assert os.path.isdir(dir)", "FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir,", "dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir,", "config_name): return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self,", "_load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self,", "default, **kwargs): self._files = {} files = self._get_files(files, dir, filter) for f in", "__getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs): self._files =", "def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter,", "name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def", "@property def files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self, files,", "filter) for f in files: _file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name)", "files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs)", "if not files and not dir: raise ValueError(\"No files or search directory provided.\")", "import os, re, collections from attrdict import AttrDict from app_settings import file_search from", "and not dir: raise ValueError(\"No files or search directory provided.\") if files: if", "_get_files(self, files, dir, filter): if isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return", "self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self,", "assert isinstance(f, str) else: assert isinstance(files, str) if dir: assert isinstance(dir, str) assert", "def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return", "files, dir, resolve_type): if not files and not dir: raise ValueError(\"No files or", "import AttrDict from app_settings import file_search from app_settings import FileFactory __all__ = [\"Config\"]", "= [\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir,", "if config_name in self._files: self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name) @property", "dir, filter, default, **kwargs): self._files = {} files = self._get_files(files, dir, filter) for", "AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\",", "files = self._get_files(files, dir, filter) for f in files: _file = FileFactory.create(f, default,", "config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def", "def save(self, config_name): if config_name in self._files: self._save_config(config_name) def save_all(self): for _name in", "file_search from app_settings import FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self, files=None,", "def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\",", "self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self,", "raise ValueError(\"No files or search directory provided.\") if files: if isinstance(files, collections.Iterable): for", "ValueError(\"No files or search directory provided.\") if files: if isinstance(files, collections.Iterable): for f", "def _get_files(self, files, dir, filter): if isinstance(files, str): return [files] if isinstance(files, collections.Iterable):", "dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files()", "self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files, dir, filter): if isinstance(files, str): return", "config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self,", "files or search directory provided.\") if files: if isinstance(files, collections.Iterable): for f in", "config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict =", "isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return files if dir: return file_search(dir,", "dir: return file_search(dir, filter, recursive=True) return [] def _load_files(self): for _name, _file in", "= dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files,", "self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self, config_name): if config_name in self._files:", "config_name in self._files: self._save_config(config_name) def save_all(self): for _name in self._files: self.save(_name) @property def", "Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files, dir,", "_file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self,", "def _create_files(self, files, dir, filter, default, **kwargs): self._files = {} files = self._get_files(files,", "self._files[_name] = _file def _get_files(self, files, dir, filter): if isinstance(files, str): return [files]", "class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default) self._create_files(files,", "setattr(self, config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename):", "dir, filter) for f in files: _file = FileFactory.create(f, default, **kwargs) _name =", "if isinstance(files, collections.Iterable): return files if dir: return file_search(dir, filter, recursive=True) return []", "= self._get_files(files, dir, filter) for f in files: _file = FileFactory.create(f, default, **kwargs)", "str): return [files] if isinstance(files, collections.Iterable): return files if dir: return file_search(dir, filter,", "files, dir, filter): if isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return files", "self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs): self._files = {} files =", "save(self, config_name): if config_name in self._files: self._save_config(config_name) def save_all(self): for _name in self._files:", "**kwargs) self._load_files() def save(self, config_name): if config_name in self._files: self._save_config(config_name) def save_all(self): for", "dir, default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self, config_name): if config_name", "re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type): if not files and not", "self._get_files(files, dir, filter) for f in files: _file = FileFactory.create(f, default, **kwargs) _name", "re, collections from attrdict import AttrDict from app_settings import file_search from app_settings import", "files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir, filter,", "def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config))", "resolve_type): if not files and not dir: raise ValueError(\"No files or search directory", "in files: assert isinstance(f, str) else: assert isinstance(files, str) if dir: assert isinstance(dir,", "_get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def", "or search directory provided.\") if files: if isinstance(files, collections.Iterable): for f in files:", "self._files = {} files = self._get_files(files, dir, filter) for f in files: _file", "dir, filter, default, **kwargs) self._load_files() def save(self, config_name): if config_name in self._files: self._save_config(config_name)", "for f in files: assert isinstance(f, str) else: assert isinstance(files, str) if dir:", "config_name, AttrDict(config)) def _save_config(self, name): config_dict = dict(self._get_config(name)) self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return", "app_settings import FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None,", "_file def _get_files(self, files, dir, filter): if isinstance(files, str): return [files] if isinstance(files,", "_name, _file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name) def", "self._files[name].flush(config_dict) def _transform_invalid_name(self, filename): return re.sub(r\"[^A-Za-z]\", \"_\", filename) def _validate(self, files, dir, resolve_type):", "import file_search from app_settings import FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self,", "default, **kwargs) self._load_files() def save(self, config_name): if config_name in self._files: self._save_config(config_name) def save_all(self):", "files and not dir: raise ValueError(\"No files or search directory provided.\") if files:", "[\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs): self._validate(files, dir, default)", "= FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name] = _file def _get_files(self, files,", "dir, filter): if isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return files if", "filter): if isinstance(files, str): return [files] if isinstance(files, collections.Iterable): return files if dir:", "collections.Iterable): return files if dir: return file_search(dir, filter, recursive=True) return [] def _load_files(self):", "default) self._create_files(files, dir, filter, default, **kwargs) self._load_files() def save(self, config_name): if config_name in", "not dir: raise ValueError(\"No files or search directory provided.\") if files: if isinstance(files,", "def files(self): return list(self._files.keys()) def __getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir,", "FileFactory __all__ = [\"Config\"] class Config(object): def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs):", "key): return self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs): self._files = {}", "= {} files = self._get_files(files, dir, filter) for f in files: _file =", "filter, default, **kwargs): self._files = {} files = self._get_files(files, dir, filter) for f", "_name in self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self, key): return", "in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name,", "file_search(dir, filter, recursive=True) return [] def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name,", "dir, resolve_type): if not files and not dir: raise ValueError(\"No files or search", "self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self, config_name, config):", "for f in files: _file = FileFactory.create(f, default, **kwargs) _name = self._transform_invalid_name(_file.name) self._files[_name]", "files: if isinstance(files, collections.Iterable): for f in files: assert isinstance(f, str) else: assert", "filter, recursive=True) return [] def _load_files(self): for _name, _file in self._files.items(): self._add_config(_name, _file.load())", "return getattr(self, config_name) def _add_config(self, config_name, config): setattr(self, config_name, AttrDict(config)) def _save_config(self, name):", "def __getitem__(self, key): return self._get_config(key) def _create_files(self, files, dir, filter, default, **kwargs): self._files", "{} files = self._get_files(files, dir, filter) for f in files: _file = FileFactory.create(f,", "_file in self._files.items(): self._add_config(_name, _file.load()) def _get_config(self, config_name): return getattr(self, config_name) def _add_config(self,", "if isinstance(files, collections.Iterable): for f in files: assert isinstance(f, str) else: assert isinstance(files,", "[files] if isinstance(files, collections.Iterable): return files if dir: return file_search(dir, filter, recursive=True) return", "for _name in self._files: self.save(_name) @property def files(self): return list(self._files.keys()) def __getitem__(self, key):" ]
[ "def load(): \"\"\"Load all processor plugins that are enabled. :returns: priority sorted processor", "all processor plugins that are enabled. :returns: priority sorted processor plugins (high to", "processor plugins that are enabled. :returns: priority sorted processor plugins (high to low)", "that are enabled. :returns: priority sorted processor plugins (high to low) :rtype: list", "tvrenamer.processors import base def load(): \"\"\"Load all processor plugins that are enabled. :returns:", "load(): \"\"\"Load all processor plugins that are enabled. :returns: priority sorted processor plugins", "from tvrenamer.processors import base def load(): \"\"\"Load all processor plugins that are enabled.", "\"\"\"Load all processor plugins that are enabled. :returns: priority sorted processor plugins (high", "plugins that are enabled. :returns: priority sorted processor plugins (high to low) :rtype:", "are enabled. :returns: priority sorted processor plugins (high to low) :rtype: list \"\"\"", ":returns: priority sorted processor plugins (high to low) :rtype: list \"\"\" return base.EnabledExtensionManager()", "enabled. :returns: priority sorted processor plugins (high to low) :rtype: list \"\"\" return", "plugins\"\"\" from tvrenamer.processors import base def load(): \"\"\"Load all processor plugins that are", "import base def load(): \"\"\"Load all processor plugins that are enabled. :returns: priority", "<reponame>shad7/tvrenamer \"\"\"Result processors plugins\"\"\" from tvrenamer.processors import base def load(): \"\"\"Load all processor", "base def load(): \"\"\"Load all processor plugins that are enabled. :returns: priority sorted", "\"\"\"Result processors plugins\"\"\" from tvrenamer.processors import base def load(): \"\"\"Load all processor plugins", "processors plugins\"\"\" from tvrenamer.processors import base def load(): \"\"\"Load all processor plugins that" ]
[ "{\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\") def authenticator_fixture(config): return HTTPBasicAuth(username=config[\"api_key\"],", "pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\",", "return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\") def authenticator_fixture(config): return", "config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\") def authenticator_fixture(config):", "\"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\") def authenticator_fixture(config): return HTTPBasicAuth(username=config[\"api_key\"], password=\"<PASSWORD>\")", "import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\":", "# Copyright (c) 2022 Airbyte, Inc., all rights reserved. # import pytest from", "Airbyte, Inc., all rights reserved. # import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\")", "from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\":", "rights reserved. # import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return", "# # Copyright (c) 2022 Airbyte, Inc., all rights reserved. # import pytest", "2022 Airbyte, Inc., all rights reserved. # import pytest from requests.auth import HTTPBasicAuth", "Inc., all rights reserved. # import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def", "def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\") def", "Copyright (c) 2022 Airbyte, Inc., all rights reserved. # import pytest from requests.auth", "reserved. # import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\":", "import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\":", "requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50,", "(c) 2022 Airbyte, Inc., all rights reserved. # import pytest from requests.auth import", "HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"}", "@pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\", \"api_key\": \"secret_api_key\", \"requests_per_minute\": 50, \"start_date\": \"2002-02-10T22:21:44Z\"} @pytest.fixture(name=\"authenticator\")", "# import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture(): return {\"domain\": \"test.freshdesk.com\",", "all rights reserved. # import pytest from requests.auth import HTTPBasicAuth @pytest.fixture(name=\"config\") def config_fixture():" ]
[ "tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for", "np.isnan(key) or key not in {-999, None, np.NaN} ] idxs = [] for", "self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self):", "deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod", "= dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except", "pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt =", "iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for", "the database. ''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL", "= self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx] yu = [y2[idx] for", "@staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined in RG climatology ''' rgFilename", "= outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows", "in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp,", "return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda", "None otherwise self.presLevels = [ 2.5, 10. , 20. , 30. , 40.", "n_rows = int(np.floor(365/period)) datesSet = [] for year in range(2007, 2019): yearSet =", "at least two points return None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy()", "xy = zip(x, y) ys = [y for _, y in sorted(xy)] xs", "lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for x in presRanges] return", "np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'),", "1300. , 1350. , 1412.5, 1500. , 1600. , 1700. , 1800. ,", "when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete", "{}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err: logging.warning('error", "small enough so as to not pass the 15 MB limit set by", "2.5 shallow: 10 to 182.5 dbar +- 5 medium: 200 to 462.5 dbar", "make_profile_interpolation_function(x,y): ''' creates interpolation function df is a dataframe containing columns xLab and", "for _, y in sorted(xy)] xs = sorted(x) return xs, ys @staticmethod def", "pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975)", "logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx: continue", ", 130. , 140. , 150. , 160. , 170. , 182.5, 200.", "xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1)", "return None x, y = self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y)", "y @staticmethod def sort_list(x, y): '''sort x based off of y''' xy =", "to 1050 dbar +- 30 abbysal: 1100 to 1975 dbar +- 60 \"\"\"", "as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf,", "%(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime", "in self.exceptBasin: # ignores basins reject=True else: reject = False return reject @staticmethod", "self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx] yu = [y2[idx] for idx", "bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '')", "{1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles,", ", 420. , 440. , 462.5, 500. , 550. , 600. , 650.", "None, np.NaN} ] except Exception as err: pdb.set_trace() print(err) xu = [xu[idx] for", "baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery =", "error if not resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp))", "try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error", "self.reduceMeas) except Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on", "presLevels[33:45] ] abbysalRanges = [ [x - 60, x + 60] for x", "enough so as to not pass the 15 MB limit set by the", "320. , 340. , 360. , 380. , 400. , 420. , 440.", "continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx))", "15 MB limit set by the database. ''' if appLocal: baseURL = 'http://localhost:3000'", "@staticmethod def unique_idxs(seq): '''gets unique, non nan and non -999 indexes''' tally =", "help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float,", "= datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds,", "from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None,", "parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\",", "= iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as f:", "[] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x,", "= lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]]", "comprise of a string formatted to be: '[lowPres,highPres]' Try to make the query", "datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal", "yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements'] if len(meas) ==", "dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception", "iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with", "the query small enough so as to not pass the 15 MB limit", "@staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice", "url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery if basin:", "save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if", "self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not", "raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off", "+= basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider any", "xintp, 'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err)", "depths catagory surface: at 2.5 dbar +- 2.5 shallow: 10 to 182.5 dbar", "self.qcKeep: reject = True elif not profile['date_qc'] in self.qcKeep: reject = True elif", "# cannot be interpolated reject = True elif profile['BASIN'] in self.exceptBasin: # ignores", "nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this", "to 1975 dbar +- 60 \"\"\" stringifyArray = lambda x: str(x).replace(' ', '')", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log", "yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf", "\"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for year in range(2007, 2019): yearSet", "other than 2xx an error if not resp.status_code // 100 == 2: raise", "default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int,", "resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json()", "[] y = [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y", "cannot be interpolated reject = True elif profile['BASIN'] in self.exceptBasin: # ignores basins", "profile['measurements'] if len(meas) == 0: return None if not yLab in meas[0].keys(): return", "+ ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s -", "self.intp_pres(xintp, presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on", "tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt", "rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1 and meas[xLab][0] == xintp: yintp", "[] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep", "90. , 100. , 110. , 120. , 130. , 140. , 150.", "'') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x -", "not resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles =", "iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err: logging.warning('error when interpolating", "@staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of depths catagory surface:", "make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements'] if len(meas) == 0: return", "pchip needs at least two points return None f = self.make_profile_interpolation_function(x, y) rowDict", "be interpolated xintp: the values to be interpolated ''' outArray = [] for", "self.basin = basin # indian ocean only Set to None otherwise self.presLevels =", "self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp:", "5] for x in presLevels[1:19] ] mediumRanges = [ [x - 15, x", "dbar +- 30 abbysal: 1100 to 1975 dbar +- 60 \"\"\" stringifyArray =", "myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':' +", "default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx =", "iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start =", "parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?',", "1 and meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab]", "pdb import requests import numpy as np import os, sys import xarray as", "+ endDate presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres) url", "= '&intPres=' + str(intPres) url = baseURL + startDateQuery + endDateQuery + presRangeQuery", "presRange should comprise of a string formatted to be: '[lowPres,highPres]' Try to make", "'&intPres=' + str(intPres) url = baseURL + startDateQuery + endDateQuery + presRangeQuery +", "def record_to_array(measurements, xLab, yLab): x = [] y = [] for meas in", "60, x + 60] for x in presLevels[45:] ] presRanges = surfaceRange +", ", 550. , 600. , 650. , 700. , 750. , 800. ,", "import logging from scipy.interpolate import PchipInterpolator import argparse from collections import OrderedDict, defaultdict", "with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod", "- %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime =", "presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean for a", "< 2: # pchip needs at least two points return None f =", "2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create", "of dates split into n periods. period is in days. \"\"\" n_rows =", "60] for x in presLevels[45:] ] presRanges = surfaceRange + shallowRanges + mediumRanges", "= f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'):", "temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as", "def main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel", "True elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else: reject = False", "try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err)", "def format_xy(self, x, y): '''prep for interpolation''' x2, y2 = self.sort_list(x, y) try:", "points from db query self.qcKeep = set([1,2]) # used to filter bad positions", "y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise Exception return f", "= self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx in", "reject = True elif len(profile['measurements']) < 2: # cannot be interpolated reject =", "self.exceptBasin: # ignores basins reject=True else: reject = False return reject @staticmethod def", "except Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx:", "] abbysalRanges = [ [x - 60, x + 60] for x in", "+- 60 \"\"\" stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0]", "x + 30] for x in presLevels[33:45] ] abbysalRanges = [ [x -", "500 to 1050 dbar +- 30 abbysal: 1100 to 1975 dbar +- 60", "key, locs in dups if not np.isnan(key) or key not in {-999, None,", "'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if", "[ 2.5, 10. , 20. , 30. , 40. , 50. , 60.", "logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for psal {0} running for: {1}'.format(xintp,", "for x in presLevels[19:33] ] deepRanges = [ [x - 30, x +", "return x, y @staticmethod def sort_list(x, y): '''sort x based off of y'''", "rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist()", "basin = myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl)", "for idx in x_dup_idx] yu = [y2[idx] for idx in x_dup_idx] # remove", "+- 2.5 shallow: 10 to 182.5 dbar +- 5 medium: 200 to 462.5", "pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx)", "200 to 462.5 dbar +- 15 deep: 500 to 1050 dbar +- 30", "self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes excess points from", "and none y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999,", "not pass the 15 MB limit set by the database. ''' if appLocal:", "class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet", "make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of depths catagory surface: at 2.5", "y in sorted(xy)] xs = sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets", "+ presRangeQuery + intPresQuery if basin: basinQuery = '&basin=' + basin url +=", "for the interpolation input x yLab: the column to be interpolated xintp: the", "deepRanges = [ [x - 30, x + 30] for x in presLevels[33:45]", "enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs in tally.items() ] dups =", "+ deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x in presRanges] return presRanges", "collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False):", "xs = sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non nan", "else: reject = False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function", "to be interpolated ''' outArray = [] for profile in profiles: rowDict =", "presLevels[0]+ 2.5]] shallowRanges = [ [x - 5, x + 5] for x", "presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): '''", "= [] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y):", "surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x - 5,", "1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def", "import PchipInterpolator import argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self,", "yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds = lambda", "[x - 5, x + 5] for x in presLevels[1:19] ] mediumRanges =", "else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet)))", "for psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and", "str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s", "+ intPresQuery if basin: basinQuery = '&basin=' + basin url += basinQuery url", "indexes''' tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs)", "rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf", "= bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for", "def intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp,", "pressure ranges defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename,", "yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet", "not in {-999, None, np.NaN} ] idxs = [] for dup in sorted(dups):", "so as to not pass the 15 MB limit set by the database.", "reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df is a dataframe containing", "= yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a", "parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str,", "= meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self,", "like so: 'YYYY-MM-DD' presRange should comprise of a string formatted to be: '[lowPres,highPres]'", "dups if not np.isnan(key) or key not in {-999, None, np.NaN} ] idxs", "datesSet = [] for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows)", "query horizontal slice of ocean for a specified time range startDate and endDate", "in enumerate(yu) if not key in {-999, None, np.NaN} ] except Exception as", "help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0)", "= defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs", "if not yLab in meas[0].keys(): return None x, y = self.record_to_array(meas, xLab, yLab)", "make_rg_pres_ranges(): ''' uses pressure ranges defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc'", "] def main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx,", "{}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in", "otherwise self.presLevels = [ 2.5, 10. , 20. , 30. , 40. ,", "= True elif not profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements']) <", "= True elif len(profile['measurements']) < 2: # cannot be interpolated reject = True", "{}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if", "outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in", ", 1100. , 1150. , 1200. , 1250. , 1300. , 1350. ,", "for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to those", "None x, y = self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y) if", "interpolated reject = True elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else:", "20. , 30. , 40. , 50. , 60. , 70. , 80.", "- 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x - 5, x + 5]", "type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of", "340. , 360. , 380. , 400. , 420. , 440. , 462.5,", "iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as f: if", "if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\",", "OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal =", "open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def", "tally.items() ] dups = [ (key, locs) for key, locs in dups if", "string formatted to be: '[lowPres,highPres]' Try to make the query small enough so", "bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for x", ", 220. , 240. , 260. , 280. , 300. , 320. ,", ", 1600. , 1700. , 1800. , 1900. , 1975., 2000.] self.pLevelRange =", "to None otherwise self.presLevels = [ 2.5, 10. , 20. , 30. ,", "idx in x_dup_idx] # remove none -999 and none y_nan_idx =[idx for idx,key", "dups = [ (key, locs) for key, locs in dups if not np.isnan(key)", "550. , 600. , 650. , 700. , 750. , 800. , 850.", "= [y2[idx] for idx in x_dup_idx] # remove none -999 and none y_nan_idx", "Set to None otherwise self.presLevels = [ 2.5, 10. , 20. , 30.", "def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to those specified in pLevelRange", "self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx]", "''' uses pressure ranges defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg", "profile xLab: the column name for the interpolation input x yLab: the column", "= surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x)", "of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def", "idx in x_dup_idx] yu = [y2[idx] for idx in x_dup_idx] # remove none", "psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres", "= baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery", "interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at", "sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err:", "y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999, None, np.NaN}", ", 1800. , 1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges()", "== 0: return None if not yLab in meas[0].keys(): return None x, y", "x + 15] for x in presLevels[19:33] ] deepRanges = [ [x -", ", 1350. , 1412.5, 1500. , 1600. , 1700. , 1800. , 1900.", "380. , 400. , 420. , 440. , 462.5, 500. , 550. ,", "logging.debug('Start of log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx,", "idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ] except Exception", "as err: logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp,", "as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx))", "'[lowPres,highPres]' Try to make the query small enough so as to not pass", "= [ [x - 30, x + 30] for x in presLevels[33:45] ]", "sorted(xy)] xs = sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non", "return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of depths", "ys @staticmethod def unique_idxs(seq): '''gets unique, non nan and non -999 indexes''' tally", "input x yLab: the column to be interpolated xintp: the values to be", "not yLab in meas[0].keys(): return None x, y = self.record_to_array(meas, xLab, yLab) x,", "tdx < self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate =", "datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet))", "tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick = datetime.now()", "60. , 70. , 80. , 90. , 100. , 110. , 120.", "1350. , 1412.5, 1500. , 1600. , 1700. , 1800. , 1900. ,", "at 2.5 dbar +- 2.5 shallow: 10 to 182.5 dbar +- 5 medium:", "yLab='temp'): ''' make a dataframe of interpolated values set at xintp for each", "else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x = [] y =", "2: # pchip needs at least two points return None f = self.make_profile_interpolation_function(x,", "xintp, 'pres', 'temp') except Exception as err: logging.warning('error when interpolating temp') logging.warning(err) continue", "xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser", "of a string formatted to be: '[lowPres,highPres]' Try to make the query small", "10. , 20. , 30. , 40. , 50. , 60. , 70.", "set at xintp for each profile xLab: the column name for the interpolation", "as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab,", "logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if self.basin:", "main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp", ", 260. , 280. , 300. , 320. , 340. , 360. ,", "presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName", "non -999 indexes''' tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups =", "'&endDate=' + endDate presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres)", "x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined", "2.5, 10. , 20. , 30. , 40. , 50. , 60. ,", "in dups if not np.isnan(key) or key not in {-999, None, np.NaN} ]", "x_dup_idx] yu = [y2[idx] for idx in x_dup_idx] # remove none -999 and", "sort_list(x, y): '''sort x based off of y''' xy = zip(x, y) ys", "self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop')", "= [] y = [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x,", "@staticmethod def get_dates_set(period=30): \"\"\" create a set of dates split into n periods.", "from scipy.interpolate import PchipInterpolator import argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object):", "+ abbysalRanges presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def", "return idxs def format_xy(self, x, y): '''prep for interpolation''' x2, y2 = self.sort_list(x,", "ignores basins reject=True else: reject = False return reject @staticmethod def make_profile_interpolation_function(x,y): '''", "n periods. period is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = []", "timeTick-start logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): '''", "range startDate and endDate should be a string formated like so: 'YYYY-MM-DD' presRange", "= pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set", "__name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float,", "+ endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery = '&basin=' + basin", "presLevels[45:] ] presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges", "return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe of", "1412.5, 1500. , 1600. , 1700. , 1800. , 1900. , 1975., 2000.]", "f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of depths catagory", "import pdb import requests import numpy as np import os, sys import xarray", "level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start", "y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y): '''sort x based off of", "xLab and yLab ''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception", "dbar +- 5 medium: 200 to 462.5 dbar +- 15 deep: 500 to", "return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query", "lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate,", "for key, locs in dups if not np.isnan(key) or key not in {-999,", "starttdx self.reduceMeas = False #removes excess points from db query self.qcKeep = set([1,2])", "raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self, profile):", "appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas = False", "db query self.qcKeep = set([1,2]) # used to filter bad positions and dates", "in meas[0].keys(): return None x, y = self.record_to_array(meas, xLab, yLab) x, y =", "axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0]))", "-999 and none y_nan_idx =[idx for idx,key in enumerate(yu) if not key in", "basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin", "get_dates_set(period=30): \"\"\" create a set of dates split into n periods. period is", "xLab, yLab): meas = profile['measurements'] if len(meas) == 0: return None if not", "query small enough so as to not pass the 15 MB limit set", "level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={},", "type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs =", "= self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas)", "1600. , 1700. , 1800. , 1900. , 1975., 2000.] self.pLevelRange = pLevelRange", "{}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for psal", "of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName", "10 to 182.5 dbar +- 5 medium: 200 to 462.5 dbar +- 15", "'YYYY-MM-DD' presRange should comprise of a string formatted to be: '[lowPres,highPres]' Try to", "days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for year in range(2007, 2019):", "default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\",", "defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs in", "[stringifyArray(x) for x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure", "file for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to complete: {} seconds'.format(dt.seconds)", "1150. , 1200. , 1250. , 1300. , 1350. , 1412.5, 1500. ,", "in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ ==", "220. , 240. , 260. , 280. , 300. , 320. , 340.", "== 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return profiles def", ", 40. , 50. , 60. , 70. , 80. , 90. ,", "time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run", "2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds =", "should be a string formated like so: 'YYYY-MM-DD' presRange should comprise of a", "exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes excess points from db query", "''' outArray = [] for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab,", "self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a", "420. , 440. , 462.5, 500. , 550. , 600. , 650. ,", "self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp)", "1050. , 1100. , 1150. , 1200. , 1250. , 1300. , 1350.", "a dataframe containing columns xLab and yLab ''' try: f = PchipInterpolator(x, y,", "= [] for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if", "of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp')", "[yu[idx] for idx in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp, xLab,", "= pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0)", "- %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now() pos", "= list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None,", "yu = [y2[idx] for idx in x_dup_idx] # remove none -999 and none", "str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'", "110. , 120. , 130. , 140. , 150. , 160. , 170.", "in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet", "defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal", "#logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT,", "int(np.floor(365/period)) datesSet = [] for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'),", "self.format_xy(x, y) if len(x) < 2: # pchip needs at least two points", "formatted to be: '[lowPres,highPres]' Try to make the query small enough so as", "presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres) url = baseURL", "iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName", "y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx", "logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces", "x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate,", "starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx =", "30, x + 30] for x in presLevels[33:45] ] abbysalRanges = [ [x", "':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s", "50. , 60. , 70. , 80. , 90. , 100. , 110.", "np import os, sys import xarray as xr from datetime import datetime, timedelta", "requests.get(url) # Consider any status other than 2xx an error if not resp.status_code", "parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr =", "''' query horizontal slice of ocean for a specified time range startDate and", "'/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate=' + endDate presRangeQuery =", "= starttdx self.reduceMeas = False #removes excess points from db query self.qcKeep =", "= self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes excess", "x, y = self.format_xy(x, y) if len(x) < 2: # pchip needs at", "tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation at time", "f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab):", "= str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s -", "xLab: the column name for the interpolation input x yLab: the column to", "url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider", "xu = [x2[idx] for idx in x_dup_idx] yu = [y2[idx] for idx in", "else: yintp = f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles, xintp,", "240. , 260. , 280. , 300. , 320. , 340. , 360.", "df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange):", "logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles)))", "[x2[idx] for idx in x_dup_idx] yu = [y2[idx] for idx in x_dup_idx] #", "nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\",", "for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y):", "y = self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y) if len(x) <", "at time index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate,", "2xx an error if not resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected", ", 60. , 70. , 80. , 90. , 100. , 110. ,", "set([1,2]) # used to filter bad positions and dates self.basin = basin #", "950. , 1000. , 1050. , 1100. , 1150. , 1200. , 1250.", "presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges =", "for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep for", "= '&basin=' + basin url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp", "found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception", "@staticmethod def record_to_array(measurements, xLab, yLab): x = [] y = [] for meas", "startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal,", "shallowRanges = [ [x - 5, x + 5] for x in presLevels[1:19]", "to not pass the 15 MB limit set by the database. ''' if", "key,locs in tally.items() ] dups = [ (key, locs) for key, locs in", "the column to be interpolated xintp: the values to be interpolated ''' outArray", "basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt = endTime - startTime", "1050 dbar +- 30 abbysal: 1100 to 1975 dbar +- 60 \"\"\" stringifyArray", "ranges defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False)", "specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[", "abbysal: 1100 to 1975 dbar +- 60 \"\"\" stringifyArray = lambda x: str(x).replace('", "outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number", "pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to complete: {} seconds'.format(dt.seconds) print(dtStr) logging.debug(dtStr)", "create a set of dates split into n periods. period is in days.", "+ 60] for x in presLevels[45:] ] presRanges = surfaceRange + shallowRanges +", ", 90. , 100. , 110. , 120. , 130. , 140. ,", "{1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to those specified", "= [ [x - 60, x + 60] for x in presLevels[45:] ]", "and dates self.basin = basin # indian ocean only Set to None otherwise", ", 120. , 130. , 140. , 150. , 160. , 170. ,", ", 400. , 420. , 440. , 462.5, 500. , 550. , 600.", "so: 'YYYY-MM-DD' presRange should comprise of a string formatted to be: '[lowPres,highPres]' Try", "outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any',", "idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep for interpolation''' x2, y2 =", "@staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df is a dataframe containing columns", "if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x", "in y_nan_idx] yu = [yu[idx] for idx in y_nan_idx] return xu, yu def", "30. , 40. , 50. , 60. , 70. , 80. , 90.", "yLab in meas[0].keys(): return None x, y = self.record_to_array(meas, xLab, yLab) x, y", "continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles", "\"\"\" stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5,", ", 700. , 750. , 800. , 850. , 900. , 950. ,", "_, y in sorted(xy)] xs = sorted(x) return xs, ys @staticmethod def unique_idxs(seq):", "'pres', 'temp') except Exception as err: logging.warning('error when interpolating temp') logging.warning(err) continue try:", ", 800. , 850. , 900. , 950. , 1000. , 1050. ,", "120. , 130. , 140. , 150. , 160. , 170. , 182.5,", "each profile xLab: the column name for the interpolation input x yLab: the", "''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace()", ", 1000. , 1050. , 1100. , 1150. , 1200. , 1250. ,", "default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange", "column name for the interpolation input x yLab: the column to be interpolated", "pd import pdb import requests import numpy as np import os, sys import", "Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of", "x yLab: the column to be interpolated xintp: the values to be interpolated", "< self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate = dates", "ranges to those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1])", "'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate", "Exception as err: pdb.set_trace() print(err) xu = [xu[idx] for idx in y_nan_idx] yu", "a set of dates split into n periods. period is in days. \"\"\"", "= np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds = lambda x:", "1800. , 1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges()", "parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str,", "presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename,", "complete at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start", "presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)", "ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx]", "''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges", "presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure", "xintp: yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp return rowDict", "700. , 750. , 800. , 850. , 900. , 950. , 1000.", "in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep for interpolation''' x2,", "ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self, profile): if", "= [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) +", "column to be interpolated xintp: the values to be interpolated ''' outArray =", "1100. , 1150. , 1200. , 1250. , 1300. , 1350. , 1412.5,", "appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery =", "and pres ranges to those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx", "locs in dups if not np.isnan(key) or key not in {-999, None, np.NaN}", "= [ (key,locs) for key,locs in tally.items() ] dups = [ (key, locs)", "[x - 60, x + 60] for x in presLevels[45:] ] presRanges =", "presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date =", "', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x", "startTime logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time", "unique, non nan and non -999 indexes''' tally = defaultdict(list) for idx,item in", "= profile['measurements'] if len(meas) == 0: return None if not yLab in meas[0].keys():", "70. , 80. , 90. , 100. , 110. , 120. , 130.", "iDf.empty: with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False)", "as err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure", "help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl,", "dataframe of interpolated values set at xintp for each profile xLab: the column", "endTime - startTime logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange)) dtStr", "for x in presLevels[45:] ] presRanges = surfaceRange + shallowRanges + mediumRanges +", "= 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet):", "columns xLab and yLab ''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except", "key not in {-999, None, np.NaN} ] idxs = [] for dup in", "'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in df:", "+- 15 deep: 500 to 1050 dbar +- 30 abbysal: 1100 to 1975", "presRange intPresQuery = '&intPres=' + str(intPres) url = baseURL + startDateQuery + endDateQuery", "for idx in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab):", "to be: '[lowPres,highPres]' Try to make the query small enough so as to", "else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery", "def get_dates_set(period=30): \"\"\" create a set of dates split into n periods. period", "to make the query small enough so as to not pass the 15", "pass the 15 MB limit set by the database. ''' if appLocal: baseURL", "be: '[lowPres,highPres]' Try to make the query small enough so as to not", "= int(np.floor(365/period)) datesSet = [] for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01',", "__init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin", "# Consider any status other than 2xx an error if not resp.status_code //", "in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas =", "'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG)", "pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a')", "= 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName,", "= outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of", "formated like so: 'YYYY-MM-DD' presRange should comprise of a string formatted to be:", "slice of ocean for a specified time range startDate and endDate should be", "5, x + 5] for x in presLevels[1:19] ] mediumRanges = [ [x", "import numpy as np import os, sys import xarray as xr from datetime", "ys = [y for _, y in sorted(xy)] xs = sorted(x) return xs,", "= requests.get(url) # Consider any status other than 2xx an error if not", "rowDict[xLab] = xintp if len(meas) == 1 and meas[xLab][0] == xintp: yintp =", "80. , 90. , 100. , 110. , 120. , 130. , 140.", "unique_idxs(seq): '''gets unique, non nan and non -999 indexes''' tally = defaultdict(list) for", "shallow: 10 to 182.5 dbar +- 5 medium: 200 to 462.5 dbar +-", "xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non nan and non -999 indexes'''", "basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider any status", ", 280. , 300. , 320. , 340. , 360. , 380. ,", "[x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange,", ", 150. , 160. , 170. , 182.5, 200. , 220. , 240.", "{-999, None, np.NaN} ] except Exception as err: pdb.set_trace() print(err) xu = [xu[idx]", "462.5, 500. , 550. , 600. , 650. , 700. , 750. ,", "{-999, None, np.NaN} ] idxs = [] for dup in sorted(dups): idxs.append(dup[1][0]) return", "type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin", "= self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\",", "= xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x:", "= [yu[idx] for idx in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp,", "from db query self.qcKeep = set([1,2]) # used to filter bad positions and", "for idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs in tally.items()", "[ [x - 5, x + 5] for x in presLevels[1:19] ] mediumRanges", "1100 to 1975 dbar +- 60 \"\"\" stringifyArray = lambda x: str(x).replace(' ',", "x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres,", "parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\",", "= lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for x in presRanges]", "is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for year in", "x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y): '''sort x based off", "xu = [xu[idx] for idx in y_nan_idx] yu = [yu[idx] for idx in", "zip(x, y) ys = [y for _, y in sorted(xy)] xs = sorted(x)", "900. , 950. , 1000. , 1050. , 1100. , 1150. , 1200.", "%(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now()", "170. , 182.5, 200. , 220. , 240. , 260. , 280. ,", "= [ 2.5, 10. , 20. , 30. , 40. , 50. ,", "750. , 800. , 850. , 900. , 950. , 1000. , 1050.", "nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx", "self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0}", "presLevels and pres ranges to those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0])", "if not np.isnan(key) or key not in {-999, None, np.NaN} ] idxs =", "dataframe containing columns xLab and yLab ''' try: f = PchipInterpolator(x, y, axis=1,", "y): '''prep for interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2)", "dates split into n periods. period is in days. \"\"\" n_rows = int(np.floor(365/period))", "shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x in", "self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) ==", "logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels):", "set of dates split into n periods. period is in days. \"\"\" n_rows", "exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx", "Pressure ranges are based off of depths catagory surface: at 2.5 dbar +-", "Exception as err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\"", "= sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non nan and", "try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx] yu =", "as pd import pdb import requests import numpy as np import os, sys", "surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x) for", "datetime.now() dt = endTime - startTime logging.debug('end of log file for pressure level", ", 1300. , 1350. , 1412.5, 1500. , 1600. , 1700. , 1800.", "defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds", "] except Exception as err: pdb.set_trace() print(err) xu = [xu[idx] for idx in", "at xintp for each profile xLab: the column name for the interpolation input", "280. , 300. , 320. , 340. , 360. , 380. , 400.", "self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id':", ", 1200. , 1250. , 1300. , 1350. , 1412.5, 1500. , 1600.", "get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean", "PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet =", ", 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30):", "reduceMeas=False): ''' query horizontal slice of ocean for a specified time range startDate", "outArray = [] for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab)", "response {}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self, profile): if not profile['position_qc']", "as err: pdb.set_trace() print(err) xu = [xu[idx] for idx in y_nan_idx] yu =", "query self.qcKeep = set([1,2]) # used to filter bad positions and dates self.basin", "iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick =", "if appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery", "remove none -999 and none y_nan_idx =[idx for idx,key in enumerate(yu) if not", "iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as", "def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject = True elif not", "[stringifyArray(x) for x in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date", "xr from datetime import datetime, timedelta import logging from scipy.interpolate import PchipInterpolator import", "= self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles", "self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error when interpolating psal')", "== 1 and meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp = f(xintp)", "str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [", "iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x = []", "rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '') presRanges =", "logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange,", "from datetime import datetime, timedelta import logging from scipy.interpolate import PchipInterpolator import argparse", "idx in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas", "100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return profiles", "for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to complete: {} seconds'.format(dt.seconds) print(dtStr)", "yintp = f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres',", "return outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName", "iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M'))", "profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else: reject = False return reject", "medium: 200 to 462.5 dbar +- 15 deep: 500 to 1050 dbar +-", ", 300. , 320. , 340. , 360. , 380. , 400. ,", "rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1 and", "yLab) x, y = self.format_xy(x, y) if len(x) < 2: # pchip needs", "= rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '') presRanges", "160. , 170. , 182.5, 200. , 220. , 240. , 260. ,", "500. , 550. , 600. , 650. , 700. , 750. , 800.", "datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx:", "] idxs = [] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self,", "in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined in", "how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique())))", "', '') presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def", "# indian ocean only Set to None otherwise self.presLevels = [ 2.5, 10.", "rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe of interpolated", "1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\"", "decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ',", "= False #removes excess points from db query self.qcKeep = set([1,2]) # used", "True elif len(profile['measurements']) < 2: # cannot be interpolated reject = True elif", "horizontal slice of ocean for a specified time range startDate and endDate should", "in self.qcKeep: reject = True elif len(profile['measurements']) < 2: # cannot be interpolated", "300. , 320. , 340. , 360. , 380. , 400. , 420.", "iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x = [] y = []", ", 1250. , 1300. , 1350. , 1412.5, 1500. , 1600. , 1700.", "len(meas) == 0: return None if not yLab in meas[0].keys(): return None x,", "= resp.json() return profiles def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject", "startDate and endDate should be a string formated like so: 'YYYY-MM-DD' presRange should", "self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside", "at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed", "self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y) if len(x) < 2: #", "header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x = [] y", "+ 15] for x in presLevels[19:33] ] deepRanges = [ [x - 30,", "endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas)", "x2, y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for", "2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self,", "130. , 140. , 150. , 160. , 170. , 182.5, 200. ,", "return None if not yLab in meas[0].keys(): return None x, y = self.record_to_array(meas,", "+ str(intPres) url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery", "creates interpolation function df is a dataframe containing columns xLab and yLab '''", "on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?',", "for idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange)", "y = self.format_xy(x, y) if len(x) < 2: # pchip needs at least", "= self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[", ", 750. , 800. , 850. , 900. , 950. , 1000. ,", ", 110. , 120. , 130. , 140. , 150. , 160. ,", "enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__':", "when interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except", "stringifyArray = lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for x in", "pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange", "=[idx for idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ]", "if not iDf.empty: with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True) else:", "sys import xarray as xr from datetime import datetime, timedelta import logging from", ", 30. , 40. , 50. , 60. , 70. , 80. ,", "''' reduces presLevels and pres ranges to those specified in pLevelRange ''' self.startIdx", "462.5 dbar +- 15 deep: 500 to 1050 dbar +- 30 abbysal: 1100", "= self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err: logging.warning('error when interpolating temp')", "for x in presLevels[33:45] ] abbysalRanges = [ [x - 60, x +", "None, np.NaN} ] idxs = [] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs", "than 2xx an error if not resp.status_code // 100 == 2: raise ValueError(\"Error:", "dups = [ (key,locs) for key,locs in tally.items() ] dups = [ (key,", "%(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now() pos =", ", 1150. , 1200. , 1250. , 1300. , 1350. , 1412.5, 1500.", "{}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self, profile): if not profile['position_qc'] in", "x + 60] for x in presLevels[45:] ] presRanges = surfaceRange + shallowRanges", "'?startDate=' + startDate endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange=' + presRange", "= [ (key, locs) for key, locs in dups if not np.isnan(key) or", "except Exception as err: pdb.set_trace() print(err) xu = [xu[idx] for idx in y_nan_idx]", "numpy as np import os, sys import xarray as xr from datetime import", "in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as", "False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df is a", "in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx", "to 182.5 dbar +- 5 medium: 200 to 462.5 dbar +- 15 deep:", "tally[item].append(idx) dups = [ (key,locs) for key,locs in tally.items() ] dups = [", "of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting", "= argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end", "based off of y''' xy = zip(x, y) ys = [y for _,", "enumerate(yu) if not key in {-999, None, np.NaN} ] except Exception as err:", "x = [] y = [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return", "filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin,", "= endTime - startTime logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange))", "for each profile xLab: the column name for the interpolation input x yLab:", "Exception as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx)", ", 1050. , 1100. , 1150. , 1200. , 1250. , 1300. ,", "datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt", "''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/'", "for idx in y_nan_idx] yu = [yu[idx] for idx in y_nan_idx] return xu,", "idxs def format_xy(self, x, y): '''prep for interpolation''' x2, y2 = self.sort_list(x, y)", "file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime", "the column name for the interpolation input x yLab: the column to be", "dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep for interpolation'''", "as np import os, sys import xarray as xr from datetime import datetime,", "ocean only Set to None otherwise self.presLevels = [ 2.5, 10. , 20.", "import os, sys import xarray as xr from datetime import datetime, timedelta import", "mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x in presRanges] return", "self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running pressure level", "err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName,", "endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not recieved:", "'&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres) url = baseURL + startDateQuery", "level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange =", "extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def", "profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName =", "presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not recieved: {}'.format(err))", "for a specified time range startDate and endDate should be a string formated", "be interpolated reject = True elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True", "RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds']", "= set([1,2]) # used to filter bad positions and dates self.basin = basin", "1500. , 1600. , 1700. , 1800. , 1900. , 1975., 2000.] self.pLevelRange", "axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod", "= 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx,", "positions and dates self.basin = basin # indian ocean only Set to None", "interpolation at time index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate,", "pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ]", "in presLevels[45:] ] presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges +", "self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for", "in {-999, None, np.NaN} ] except Exception as err: pdb.set_trace() print(err) xu =", "profile): if not profile['position_qc'] in self.qcKeep: reject = True elif not profile['date_qc'] in", ", 650. , 700. , 750. , 800. , 850. , 900. ,", "tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty:", "resp = requests.get(url) # Consider any status other than 2xx an error if", "y): '''sort x based off of y''' xy = zip(x, y) ys =", "logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation", ", 360. , 380. , 400. , 420. , 440. , 462.5, 500.", "profile['position_qc'] in self.qcKeep: reject = True elif not profile['date_qc'] in self.qcKeep: reject =", "[ [x - 30, x + 30] for x in presLevels[33:45] ] abbysalRanges", "xintp if len(meas) == 1 and meas[xLab][0] == xintp: yintp = meas[yLab][0] else:", "datesSet = datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet =", "%H:%M')) dt = timeTick-start logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt))", ", 950. , 1000. , 1050. , 1100. , 1150. , 1200. ,", "except Exception as err: logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf =", "= 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' +", "try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as", "presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined in RG", "= myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName", "index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for", "running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to", "and non -999 indexes''' tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups", "timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for psal {0}", "dbar +- 60 \"\"\" stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange =", "tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp,", "if not resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles", "to filter bad positions and dates self.basin = basin # indian ocean only", ", 140. , 150. , 160. , 170. , 182.5, 200. , 220.", "to those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels", "600. , 650. , 700. , 750. , 800. , 850. , 900.", "'a') as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements,", "'&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider any status other than 2xx", "idx,item in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs in tally.items() ]", "default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\",", "self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas", "nan and non -999 indexes''' tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx)", "and yLab ''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as", "excess points from db query self.qcKeep = set([1,2]) # used to filter bad", "if len(x) < 2: # pchip needs at least two points return None", "interpolated ''' outArray = [] for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp,", "for idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ] except", "] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running pressure", "== xintp: yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp return", "in self.qcKeep: reject = True elif not profile['date_qc'] in self.qcKeep: reject = True", "period is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for year", "182.5 dbar +- 5 medium: 200 to 462.5 dbar +- 15 deep: 500", "exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt = endTime - startTime logging.debug('end", "xintp, xLab='pres', yLab='temp'): ''' make a dataframe of interpolated values set at xintp", "a string formatted to be: '[lowPres,highPres]' Try to make the query small enough", "df is a dataframe containing columns xLab and yLab ''' try: f =", "if len(meas) == 0: return None if not yLab in meas[0].keys(): return None", "log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin", "import requests import numpy as np import os, sys import xarray as xr", "year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet +", "a string formated like so: 'YYYY-MM-DD' presRange should comprise of a string formatted", ", 80. , 90. , 100. , 110. , 120. , 130. ,", "uses pressure ranges defined in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg =", "{0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try:", "rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self,", "filter bad positions and dates self.basin = basin # indian ocean only Set", "basin # indian ocean only Set to None otherwise self.presLevels = [ 2.5,", "nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args()", "help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?',", "f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): '''", "interpolation input x yLab: the column to be interpolated xintp: the values to", "len(x) < 2: # pchip needs at least two points return None f", "+ 30] for x in presLevels[33:45] ] abbysalRanges = [ [x - 60,", "xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'},", "containing columns xLab and yLab ''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False)", "'temp') except Exception as err: logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf", "def sort_list(x, y): '''sort x based off of y''' xy = zip(x, y)", "intPresQuery = '&intPres=' + str(intPres) url = baseURL + startDateQuery + endDateQuery +", "] deepRanges = [ [x - 30, x + 30] for x in", "def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements'] if len(meas) == 0:", "ranges are based off of depths catagory surface: at 2.5 dbar +- 2.5", "log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main()", "tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f, header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x =", "return profiles def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject = True", "iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in", "presLevels[19:33] ] deepRanges = [ [x - 30, x + 30] for x", "+ presRange intPresQuery = '&intPres=' + str(intPres) url = baseURL + startDateQuery +", "for x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges", "100. , 110. , 120. , 130. , 140. , 150. , 160.", "if not profile['position_qc'] in self.qcKeep: reject = True elif not profile['date_qc'] in self.qcKeep:", "= presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser =", "len(meas) == 1 and meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp =", "= xintp if len(meas) == 1 and meas[xLab][0] == xintp: yintp = meas[yLab][0]", "yLab): x = [] y = [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab])", "self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes", "baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery =", "] dups = [ (key, locs) for key, locs in dups if not", "in sorted(xy)] xs = sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets unique,", "datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None,", "axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return", "< 2: # cannot be interpolated reject = True elif profile['BASIN'] in self.exceptBasin:", "be a string formated like so: 'YYYY-MM-DD' presRange should comprise of a string", "import argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None,", "5 medium: 200 to 462.5 dbar +- 15 deep: 500 to 1050 dbar", "intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean for a specified", "xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else:", "np.NaN} ] idxs = [] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def", "be interpolated ''' outArray = [] for profile in profiles: rowDict = self.make_interpolated_profile(profile,", "database. ''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL +=", "logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based", "(key, locs) for key, locs in dups if not np.isnan(key) or key not", "xLab, yLab): x = [] y = [] for meas in measurements: x.append(meas[xLab])", "self.starttdx = starttdx self.reduceMeas = False #removes excess points from db query self.qcKeep", "x, y @staticmethod def sort_list(x, y): '''sort x based off of y''' xy", "len(profile['measurements']) < 2: # cannot be interpolated reject = True elif profile['BASIN'] in", "are based off of depths catagory surface: at 2.5 dbar +- 2.5 shallow:", "[ (key,locs) for key,locs in tally.items() ] dups = [ (key, locs) for", "f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise", "= appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas =", "= PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise Exception", "in RG climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds =", "def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\"))", "intPresQuery if basin: basinQuery = '&basin=' + basin url += basinQuery url +=", "time index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange,", "'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue", "xintp, self.basin, self.appLocal, self.reduceMeas) except Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue", "[x - 30, x + 30] for x in presLevels[33:45] ] abbysalRanges =", "interpolated values set at xintp for each profile xLab: the column name for", "meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y): '''sort", "for tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation at", "format_xy(self, x, y): '''prep for interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx", "150. , 160. , 170. , 182.5, 200. , 220. , 240. ,", "'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp)", "based off of depths catagory surface: at 2.5 dbar +- 2.5 shallow: 10", "= self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y) if len(x) < 2:", "appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean for a specified time range", "def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set()", "Exception as err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp,", "+ 5] for x in presLevels[1:19] ] mediumRanges = [ [x - 15,", ", 900. , 950. , 1000. , 1050. , 1100. , 1150. ,", "15] for x in presLevels[19:33] ] deepRanges = [ [x - 30, x", "startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime =", "basin url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) #", "+ basin url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url)", "self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set of dates", "abbysalRanges presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges():", "file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin =", "outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles", "[xu[idx] for idx in y_nan_idx] yu = [yu[idx] for idx in y_nan_idx] return", "logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres',", "continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err: pdb.set_trace()", "scipy.interpolate import PchipInterpolator import argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def", "@staticmethod def sort_list(x, y): '''sort x based off of y''' xy = zip(x,", "yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp return rowDict def", "self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start", "presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__,", "presLevels[1:19] ] mediumRanges = [ [x - 15, x + 15] for x", "''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges =", "enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate,", "rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda", "= self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error when interpolating", "// 100 == 2: raise ValueError(\"Error: Unexpected response {}\".format(resp)) profiles = resp.json() return", "abbysalRanges = [ [x - 60, x + 60] for x in presLevels[45:]", "[] for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet =", "'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number", "in {-999, None, np.NaN} ] idxs = [] for dup in sorted(dups): idxs.append(dup[1][0])", "not iDf.empty: with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True) else: iDf.to_csv(f,", "self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set of dates split into", "PchipInterpolator import argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange,", "y = [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod", "\"\"\" create a set of dates split into n periods. period is in", "1000. , 1050. , 1100. , 1150. , 1200. , 1250. , 1300.", "elif len(profile['measurements']) < 2: # cannot be interpolated reject = True elif profile['BASIN']", "appLocal=True) pos.main() endTime = datetime.now() dt = endTime - startTime logging.debug('end of log", "ocean for a specified time range startDate and endDate should be a string", "400. , 420. , 440. , 462.5, 500. , 550. , 600. ,", "for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict)", "profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except", "myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName =", "x in presLevels[45:] ] presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges", "x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx] yu = [y2[idx]", "= profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1 and meas[xLab][0]", "lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges", "err: logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres',", "Unexpected response {}\".format(resp)) profiles = resp.json() return profiles def reject_profile(self, profile): if not", "split into n periods. period is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet", "reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject = True elif not profile['date_qc']", "profiles def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject = True elif", "260. , 280. , 300. , 320. , 340. , 360. , 380.", "not np.isnan(key) or key not in {-999, None, np.NaN} ] idxs = []", "self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx", "locs) for key, locs in dups if not np.isnan(key) or key not in", "argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on", "interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp,", "'psal') except Exception as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf,", "logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to", "not profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements']) < 2: # cannot", "if len(meas) == 1 and meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp", "dt = timeTick-start logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt)) def", "Exception as err: logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles,", "two points return None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements']", "time range startDate and endDate should be a string formated like so: 'YYYY-MM-DD'", "= [xu[idx] for idx in y_nan_idx] yu = [yu[idx] for idx in y_nan_idx]", "reject=True else: reject = False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation", "800. , 850. , 900. , 950. , 1000. , 1050. , 1100.", "in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx))", "= self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def", "run for psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels", "y) ys = [y for _, y in sorted(xy)] xs = sorted(x) return", "import datetime, timedelta import logging from scipy.interpolate import PchipInterpolator import argparse from collections", "pos.main() endTime = datetime.now() dt = endTime - startTime logging.debug('end of log file", "into n periods. period is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet =", "in presLevels[33:45] ] abbysalRanges = [ [x - 60, x + 60] for", "''' creates interpolation function df is a dataframe containing columns xLab and yLab", "return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined in RG climatology", "endTime = datetime.now() dt = endTime - startTime logging.debug('end of log file for", "reject = True elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else: reject", "= zip(x, y) ys = [y for _, y in sorted(xy)] xs =", "outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number of", "str(x).replace(' ', '') presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod", "+ yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return", "datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): '''", "f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if", "needs at least two points return None f = self.make_profile_interpolation_function(x, y) rowDict =", "def make_rg_pres_ranges(): ''' uses pressure ranges defined in RG climatology ''' rgFilename =", "profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements']) < 2: # cannot be", "# pchip needs at least two points return None f = self.make_profile_interpolation_function(x, y)", "+ startDate endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange=' + presRange intPresQuery", "startDateQuery + endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery = '&basin=' +", "endDate should be a string formated like so: 'YYYY-MM-DD' presRange should comprise of", "interpolated xintp: the values to be interpolated ''' outArray = [] for profile", "self.presLevels = [ 2.5, 10. , 20. , 30. , 40. , 50.", "[y for _, y in sorted(xy)] xs = sorted(x) return xs, ys @staticmethod", "self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes excess points", "profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe of interpolated values set at", "pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges are", "interval: {}'.format(len(sliceProfiles))) try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err:", "60 \"\"\" stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] -", "= False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df is", "= 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now()", "+ mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x in presRanges]", "profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1 and meas[xLab][0] ==", "list(map(keepEnds, datesSet)) return datesSet @staticmethod def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False):", "least two points return None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del", "baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate='", "record_to_array(measurements, xLab, yLab): x = [] y = [] for meas in measurements:", "of ocean for a specified time range startDate and endDate should be a", "\"\"\" Pressure ranges are based off of depths catagory surface: at 2.5 dbar", "profiles = resp.json() return profiles def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep:", "limit set by the database. ''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL", "err: pdb.set_trace() print(err) xu = [xu[idx] for idx in y_nan_idx] yu = [yu[idx]", "if basin: basinQuery = '&basin=' + basin url += basinQuery url += '&reduceMeas='", "30] for x in presLevels[33:45] ] abbysalRanges = [ [x - 60, x", "make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe of interpolated values set", "= pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename,", "baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate=' + endDate", "idx in y_nan_idx] yu = [yu[idx] for idx in y_nan_idx] return xu, yu", "None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp", "measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y): '''sort x based", "import pandas as pd import pdb import requests import numpy as np import", "psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time", "except Exception as err: pdb.set_trace() logging.warning('error when interpolating psal') logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName,", "pdb.set_trace() print(err) xu = [xu[idx] for idx in y_nan_idx] yu = [yu[idx] for", "is a dataframe containing columns xLab and yLab ''' try: f = PchipInterpolator(x,", ", 70. , 80. , 90. , 100. , 110. , 120. ,", "endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery = '&basin=' + basin url", "182.5, 200. , 220. , 240. , 260. , 280. , 300. ,", "dbar +- 15 deep: 500 to 1050 dbar +- 30 abbysal: 1100 to", ", 1412.5, 1500. , 1600. , 1700. , 1800. , 1900. , 1975.,", "(key,locs) for key,locs in tally.items() ] dups = [ (key, locs) for key,", "none y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999, None,", "to be interpolated xintp: the values to be interpolated ''' outArray = []", ", 380. , 400. , 420. , 440. , 462.5, 500. , 550.", "return xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non nan and non -999", "myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT =", "sorted(x) return xs, ys @staticmethod def unique_idxs(seq): '''gets unique, non nan and non", "help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?',", "return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df is a dataframe", "continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval:", "x + 5] for x in presLevels[1:19] ] mediumRanges = [ [x -", "x: str(x).replace(' ', '') presRanges = [stringifyArray(x) for x in presRanges] return presRanges", "pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin =", "+ str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s - %(name)s - %(levelname)s -", "yLab): meas = profile['measurements'] if len(meas) == 0: return None if not yLab", "self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of", "= self.format_xy(x, y) if len(x) < 2: # pchip needs at least two", "[ (key, locs) for key, locs in dups if not np.isnan(key) or key", "def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe of interpolated values", "except Exception as err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels):", "not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles", "for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet", "'/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray =", "y_nan_idx] yu = [yu[idx] for idx in y_nan_idx] return xu, yu def make_interpolated_profile(self,", "+ str(reduceMeas).lower() resp = requests.get(url) # Consider any status other than 2xx an", "15 deep: 500 to 1050 dbar +- 30 abbysal: 1100 to 1975 dbar", "= [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx):", "xintp, xLab, yLab): meas = profile['measurements'] if len(meas) == 0: return None if", "values to be interpolated ''' outArray = [] for profile in profiles: rowDict", "name for the interpolation input x yLab: the column to be interpolated xintp:", "Consider any status other than 2xx an error if not resp.status_code // 100", "logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles =", "yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make a dataframe", "start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx", "dt = endTime - startTime logging.debug('end of log file for pressure level ranges:", "index: {}'.format(tdx)) startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp,", "an error if not resp.status_code // 100 == 2: raise ValueError(\"Error: Unexpected response", "as xr from datetime import datetime, timedelta import logging from scipy.interpolate import PchipInterpolator", "dates in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation at time index:", "= [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x - 5, x", ", 462.5, 500. , 550. , 600. , 650. , 700. , 750.", "make the query small enough so as to not pass the 15 MB", "= basin # indian ocean only Set to None otherwise self.presLevels = [", "type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time", "key in {-999, None, np.NaN} ] except Exception as err: pdb.set_trace() print(err) xu", "basinQuery = '&basin=' + basin url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower()", "in enumerate(seq): tally[item].append(idx) dups = [ (key,locs) for key,locs in tally.items() ] dups", "in x_dup_idx] yu = [y2[idx] for idx in x_dup_idx] # remove none -999", "pandas as pd import pdb import requests import numpy as np import os,", "import xarray as xr from datetime import datetime, timedelta import logging from scipy.interpolate", "self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange))", "off of y''' xy = zip(x, y) ys = [y for _, y", "interpolation function df is a dataframe containing columns xLab and yLab ''' try:", "and meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] =", "datetime, timedelta import logging from scipy.interpolate import PchipInterpolator import argparse from collections import", "self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d", "if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName =", ", 440. , 462.5, 500. , 550. , 600. , 650. , 700.", "sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x, y): '''prep for interpolation''' x2, y2", "x, y): '''prep for interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx =", "catagory surface: at 2.5 dbar +- 2.5 shallow: 10 to 182.5 dbar +-", "self.qcKeep: reject = True elif len(profile['measurements']) < 2: # cannot be interpolated reject", "in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf =", "profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray)", "bad positions and dates self.basin = basin # indian ocean only Set to", "recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found", "= 'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate='", "'__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000)", "dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx < self.starttdx: continue logging.debug('starting interpolation", "err: logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number", "in presLevels[19:33] ] deepRanges = [ [x - 30, x + 30] for", "2: # cannot be interpolated reject = True elif profile['BASIN'] in self.exceptBasin: #", "xLab='pres', yLab='temp'): ''' make a dataframe of interpolated values set at xintp for", "time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log')", "= [ [x - 15, x + 15] for x in presLevels[19:33] ]", "str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')]", "[[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x - 5, x +", "yLab: the column to be interpolated xintp: the values to be interpolated '''", "- 15, x + 15] for x in presLevels[19:33] ] deepRanges = [", "return xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements'] if", "1700. , 1800. , 1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges =", "meas[0].keys(): return None x, y = self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x,", "x based off of y''' xy = zip(x, y) ys = [y for", "indian ocean only Set to None otherwise self.presLevels = [ 2.5, 10. ,", "reject = True elif not profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements'])", "to 462.5 dbar +- 15 deep: 500 to 1050 dbar +- 30 abbysal:", "-999 indexes''' tally = defaultdict(list) for idx,item in enumerate(seq): tally[item].append(idx) dups = [", "= [ [x - 5, x + 5] for x in presLevels[1:19] ]", "interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception", "# ignores basins reject=True else: reject = False return reject @staticmethod def make_profile_interpolation_function(x,y):", "argparse from collections import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None},", "= self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set of dates split", "= self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf =", ", 100. , 110. , 120. , 130. , 140. , 150. ,", "pres ranges to those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx =", "datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for psal {0} running for:", "= [] for year in range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet", "= parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr", "= lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod def", "basins reject=True else: reject = False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates", "interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx]", "tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf =", "1975 dbar +- 60 \"\"\" stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange", "logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique()))) return outDf", ", 600. , 650. , 700. , 750. , 800. , 850. ,", ", 50. , 60. , 70. , 80. , 90. , 100. ,", "endDate presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres) url =", "pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0) logging.debug('number", "mediumRanges = [ [x - 15, x + 15] for x in presLevels[19:33]", "[ [x - 15, x + 15] for x in presLevels[19:33] ] deepRanges", "2.5 dbar +- 2.5 shallow: 10 to 182.5 dbar +- 5 medium: 200", "= '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of", "iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err: pdb.set_trace() logging.warning('error when", "- startTime logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange)) dtStr =", "15, x + 15] for x in presLevels[19:33] ] deepRanges = [ [x", "main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in", "meas = profile['measurements'] if len(meas) == 0: return None if not yLab in", "profile, xintp, xLab, yLab): meas = profile['measurements'] if len(meas) == 0: return None", "profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict: outArray.append(rowDict) outDf", "for key,locs in tally.items() ] dups = [ (key, locs) for key, locs", "x in presLevels[33:45] ] abbysalRanges = [ [x - 60, x + 60]", "type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter", "the 15 MB limit set by the database. ''' if appLocal: baseURL =", "for interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu =", "= self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running pressure level ranges:", "360. , 380. , 400. , 420. , 440. , 462.5, 500. ,", "[] for profile in profiles: rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab) if rowDict:", "specified time range startDate and endDate should be a string formated like so:", "@staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y", "200. , 220. , 240. , 260. , 280. , 300. , 320.", "nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log", "requests import numpy as np import os, sys import xarray as xr from", "str(intPres) url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery if", "appLocal=False): self.appLocal = appLocal self.datesSet = self.get_dates_set() self.exceptBasin = exceptBasin self.starttdx = starttdx", "startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange='", "'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start = datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates", "440. , 462.5, 500. , 550. , 600. , 650. , 700. ,", "of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl]", "False #removes excess points from db query self.qcKeep = set([1,2]) # used to", "logging.warning(err) continue self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index:", "= True elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else: reject =", "#removes excess points from db query self.qcKeep = set([1,2]) # used to filter", "'''prep for interpolation''' x2, y2 = self.sort_list(x, y) try: x_dup_idx = self.unique_idxs(x2) xu", "logging.warning('error when interpolating temp') logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal')", "= self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main", "self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp) start", "# used to filter bad positions and dates self.basin = basin # indian", "of y''' xy = zip(x, y) ys = [y for _, y in", "idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if", ", 850. , 900. , 950. , 1000. , 1050. , 1100. ,", "by the database. ''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL = 'https://argovis.colorado.edu'", "only Set to None otherwise self.presLevels = [ 2.5, 10. , 20. ,", "MB limit set by the database. ''' if appLocal: baseURL = 'http://localhost:3000' else:", "= '?startDate=' + startDate endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange=' +", "the interpolation input x yLab: the column to be interpolated xintp: the values", "+- 30 abbysal: 1100 to 1975 dbar +- 60 \"\"\" stringifyArray = lambda", "a dataframe of interpolated values set at xintp for each profile xLab: the", "index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs", ", 1700. , 1800. , 1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges", "logging from scipy.interpolate import PchipInterpolator import argparse from collections import OrderedDict, defaultdict class", "of log file') startTime = datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True)", "timedelta import logging from scipy.interpolate import PchipInterpolator import argparse from collections import OrderedDict,", "self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err: logging.warning('error when interpolating temp') logging.warning(err)", "deep: 500 to 1050 dbar +- 30 abbysal: 1100 to 1975 dbar +-", "log file for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to complete: {}", "= exceptBasin self.starttdx = starttdx self.reduceMeas = False #removes excess points from db", "in x_dup_idx] # remove none -999 and none y_nan_idx =[idx for idx,key in", "PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err: pdb.set_trace() logging.warning(err) raise Exception return", "logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick = datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt =", "def make_pres_ranges(presLevels): \"\"\" Pressure ranges are based off of depths catagory surface: at", "dbar +- 2.5 shallow: 10 to 182.5 dbar +- 5 medium: 200 to", "reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to those specified in pLevelRange '''", "myArgs = parser.parse_args() pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx", "rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab,", ", 240. , 260. , 280. , 300. , 320. , 340. ,", "= datetime.now() logging.debug('number of dates:{}'.format(len(self.datesSet))) for tdx, dates in enumerate(self.datesSet): if tdx <", "presRangeQuery + intPresQuery if basin: basinQuery = '&basin=' + basin url += basinQuery", "x, y = self.record_to_array(meas, xLab, yLab) x, y = self.format_xy(x, y) if len(x)", "on tdx: {1}'.format(xintp, tdx)) logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles))) try: iTempDf", "any status other than 2xx an error if not resp.status_code // 100 ==", "of log file for pressure level ranges: {}'.format(pLevelRange)) dtStr = 'time to complete:", "meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles,", "140. , 150. , 160. , 170. , 182.5, 200. , 220. ,", "None if not yLab in meas[0].keys(): return None x, y = self.record_to_array(meas, xLab,", "elif profile['BASIN'] in self.exceptBasin: # ignores basins reject=True else: reject = False return", "keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet = list(map(keepEnds, datesSet)) return datesSet @staticmethod", "- 5, x + 5] for x in presLevels[1:19] ] mediumRanges = [", "filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not", "'&basin=' + basin url += basinQuery url += '&reduceMeas=' + str(reduceMeas).lower() resp =", "y''' xy = zip(x, y) ys = [y for _, y in sorted(xy)]", "range(2007, 2019): yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows) datesSet = datesSet + yearSet keepEnds", "if rowDict: outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf =", "self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set of dates split into n", "1250. , 1300. , 1350. , 1412.5, 1500. , 1600. , 1700. ,", "should comprise of a string formatted to be: '[lowPres,highPres]' Try to make the", "starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr)", "as to not pass the 15 MB limit set by the database. '''", "y_nan_idx] return xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements']", "for x in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date =", "= [x2[idx] for idx in x_dup_idx] yu = [y2[idx] for idx in x_dup_idx]", "a specified time range startDate and endDate should be a string formated like", "'') presRanges = [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def save_iDF(iDf,", "xu, yu def make_interpolated_profile(self, profile, xintp, xLab, yLab): meas = profile['measurements'] if len(meas)", "x in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date)", ", 1900. , 1975., 2000.] self.pLevelRange = pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod", "= myArgs.starttdx #idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT", ", 320. , 340. , 360. , 380. , 400. , 420. ,", "x_dup_idx] # remove none -999 and none y_nan_idx =[idx for idx,key in enumerate(yu)", "logging.warning(err) continue try: iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal') except Exception as err:", "x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]] shallowRanges =", "in tally.items() ] dups = [ (key, locs) for key, locs in dups", "# remove none -999 and none y_nan_idx =[idx for idx,key in enumerate(yu) if", "+= '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider any status other than", "FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start", "dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges to those specified in", "= [stringifyArray(x) for x in presRanges] return presRanges @staticmethod def make_rg_pres_ranges(): ''' uses", "{}'.format(len(outDf['profile_id'].unique()))) return outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)", "= datetime.now() logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M')) dt = timeTick-start logging.debug('completed run for psal {0} running", "for x in presLevels[1:19] ] mediumRanges = [ [x - 15, x +", "url += '&reduceMeas=' + str(reduceMeas).lower() resp = requests.get(url) # Consider any status other", "y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1", "outDf def intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName =", "[ [x - 60, x + 60] for x in presLevels[45:] ] presRanges", "yLab], how='any', axis=0) logging.debug('number of rows in df: {}'.format(outDf.shape[0])) logging.debug('number of profiles interpolated:", ", 160. , 170. , 182.5, 200. , 220. , 240. , 260.", "= timeTick-start logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self):", "in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def sort_list(x, y): '''sort x", "y) try: x_dup_idx = self.unique_idxs(x2) xu = [x2[idx] for idx in x_dup_idx] yu", "outArray.append(rowDict) outDf = pd.DataFrame(outArray) outDf = outDf.rename({'_id': 'profile_id'}, axis=1) outDf = outDf.dropna(subset=[xLab, yLab],", "string formated like so: 'YYYY-MM-DD' presRange should comprise of a string formatted to", "del rowDict['measurements'] rowDict[xLab] = xintp if len(meas) == 1 and meas[xLab][0] == xintp:", "xarray as xr from datetime import datetime, timedelta import logging from scipy.interpolate import", "try: iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp') except Exception as err: logging.warning('error when", "surface: at 2.5 dbar +- 2.5 shallow: 10 to 182.5 dbar +- 5", ", 20. , 30. , 40. , 50. , 60. , 70. ,", "endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres='", "parser.add_argument(\"--logFileName\", help=\"name of log file\", type=str, nargs='?', default='pchipOceanSlices.log') myArgs = parser.parse_args() pLevelRange =", "pLevelRange = [myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl)", "periods. period is in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for", "= [] for meas in measurements: x.append(meas[xLab]) y.append(meas[yLab]) return x, y @staticmethod def", "#idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl) #logFileName = 'pchipOceanSlices{}.log'.format(idxStr) FORMAT = '%(asctime)s", "status other than 2xx an error if not resp.status_code // 100 == 2:", "x in presLevels[1:19] ] mediumRanges = [ [x - 15, x + 15]", "self.qcKeep = set([1,2]) # used to filter bad positions and dates self.basin =", "climatology ''' rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges", "[myArgs.minl, myArgs.maxl] basin = myArgs.basin starttdx = myArgs.starttdx #idxStr = str(myArgs.minl) + ':'", "non nan and non -999 indexes''' tally = defaultdict(list) for idx,item in enumerate(seq):", "points return None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab]", "those specified in pLevelRange ''' self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels =", "idxs = [] for dup in sorted(dups): idxs.append(dup[1][0]) return idxs def format_xy(self, x,", "of depths catagory surface: at 2.5 dbar +- 2.5 shallow: 10 to 182.5", "= 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) else: iTempFileName = 'iTempData_pres_{}.csv'.format(xintp) iPsalFileName =", "on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None)", "n_rows) datesSet = datesSet + yearSet keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')] datesSet", "xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace('", "2.5, presLevels[0]+ 2.5]] shallowRanges = [ [x - 5, x + 5] for", "+= '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate=' + endDate presRangeQuery", "in days. \"\"\" n_rows = int(np.floor(365/period)) datesSet = [] for year in range(2007,", "def unique_idxs(seq): '''gets unique, non nan and non -999 indexes''' tally = defaultdict(list)", "level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\",", "def make_profile_interpolation_function(x,y): ''' creates interpolation function df is a dataframe containing columns xLab", "self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate = dates try:", "x in presLevels[19:33] ] deepRanges = [ [x - 30, x + 30]", "40. , 50. , 60. , 70. , 80. , 90. , 100.", "return None f = self.make_profile_interpolation_function(x, y) rowDict = profile.copy() del rowDict['measurements'] rowDict[xLab] =", "make a dataframe of interpolated values set at xintp for each profile xLab:", "850. , 900. , 950. , 1000. , 1050. , 1100. , 1150.", "- 30, x + 30] for x in presLevels[33:45] ] abbysalRanges = [", "endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean for", "2.5]] shallowRanges = [ [x - 5, x + 5] for x in", "none -999 and none y_nan_idx =[idx for idx,key in enumerate(yu) if not key", "'''gets unique, non nan and non -999 indexes''' tally = defaultdict(list) for idx,item", "self.reduceMeas = False #removes excess points from db query self.qcKeep = set([1,2]) #", "function df is a dataframe containing columns xLab and yLab ''' try: f", "and endDate should be a string formated like so: 'YYYY-MM-DD' presRange should comprise", "self.save_iDF(iTempDf, iTempFileName, tdx) self.save_iDF(iPsalDf, iPsalFileName, tdx) logging.debug('interpolation complete at time index: {}'.format(tdx)) timeTick", "reject = False return reject @staticmethod def make_profile_interpolation_function(x,y): ''' creates interpolation function df", "meas[xLab][0] == xintp: yintp = meas[yLab][0] else: yintp = f(xintp) rowDict[yLab] = yintp", "presRanges = bnds.values.tolist() stringifyArray = lambda x: str(x).replace(' ', '') presRanges = [stringifyArray(x)", "not key in {-999, None, np.NaN} ] except Exception as err: pdb.set_trace() print(err)", "{}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp,", "+- 5 medium: 200 to 462.5 dbar +- 15 deep: 500 to 1050", "= [y for _, y in sorted(xy)] xs = sorted(x) return xs, ys", "yLab ''' try: f = PchipInterpolator(x, y, axis=1, extrapolate=False) except Exception as err:", "basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of ocean for a specified time", "PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt = endTime -", "basin: basinQuery = '&basin=' + basin url += basinQuery url += '&reduceMeas=' +", "xLab, yLab) x, y = self.format_xy(x, y) if len(x) < 2: # pchip", "self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ]", "not profile['position_qc'] in self.qcKeep: reject = True elif not profile['date_qc'] in self.qcKeep: reject", "d: d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as f: if tdx==0:", "reduces presLevels and pres ranges to those specified in pLevelRange ''' self.startIdx =", "yu = [yu[idx] for idx in y_nan_idx] return xu, yu def make_interpolated_profile(self, profile,", "] mediumRanges = [ [x - 15, x + 15] for x in", "'https://argovis.colorado.edu' baseURL += '/gridding/presSliceForInterpolation/' startDateQuery = '?startDate=' + startDate endDateQuery = '&endDate=' +", "dates self.basin = basin # indian ocean only Set to None otherwise self.presLevels", "[y2[idx] for idx in x_dup_idx] # remove none -999 and none y_nan_idx =[idx", ", 182.5, 200. , 220. , 240. , 260. , 280. , 300.", "xintp: the values to be interpolated ''' outArray = [] for profile in", "datetime import datetime, timedelta import logging from scipy.interpolate import PchipInterpolator import argparse from", "%H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f, header=True)", "== '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?',", "starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt = endTime - startTime logging.debug('end of", "= '/home/tyler/Desktop/RG_ArgoClim_Temp.nc' rg = xr.open_dataset(rgFilename, decode_times=False) bnds = rg['PRESSURE_bnds'] presRanges = bnds.values.tolist() stringifyArray", "'''sort x based off of y''' xy = zip(x, y) ys = [y", "if tdx < self.starttdx: continue logging.debug('starting interpolation at time index: {}'.format(tdx)) startDate, endDate", "+ startDateQuery + endDateQuery + presRangeQuery + intPresQuery if basin: basinQuery = '&basin='", "self.startIdx = self.presLevels.index(self.pLevelRange[0]) self.endIdx = self.presLevels.index(self.pLevelRange[1]) self.presLevels = self.presLevels[ self.startIdx:self.endIdx ] self.presRanges =", "set by the database. ''' if appLocal: baseURL = 'http://localhost:3000' else: baseURL =", "- %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT, filename=myArgs.logFileName, level=logging.DEBUG) logging.debug('Start of log file')", "+ shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges = [stringifyArray(x) for x", "logging.warning('profiles not recieved: {}'.format(err)) continue logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx)) logging.debug('number of", "presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date = iDf.date.apply(lambda d:", "resp.json() return profiles def reject_profile(self, profile): if not profile['position_qc'] in self.qcKeep: reject =", "self.startIdx:self.endIdx ] self.presRanges = self.presRanges[ self.startIdx:self.endIdx ] def main(self): logging.debug('inside main loop') logging.debug('running", "the values to be interpolated ''' outArray = [] for profile in profiles:", "used to filter bad positions and dates self.basin = basin # indian ocean", "os, sys import xarray as xr from datetime import datetime, timedelta import logging", "= '&presRange=' + presRange intPresQuery = '&intPres=' + str(intPres) url = baseURL +", "{}'.format(tdx)) startDate, endDate = dates try: sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin,", "of interpolated values set at xintp for each profile xLab: the column name", "{0} running for: {1}'.format(xintp, dt)) def reduce_presLevels_and_presRanges(self): ''' reduces presLevels and pres ranges", "d.strftime(\"%d-%b-%Y %H:%M:%S\")) if not iDf.empty: with open(filename, 'a') as f: if tdx==0: iDf.to_csv(f,", "presLevel in enumerate(self.presLevels): xintp = presLevel presRange = self.presRanges[idx] self.intp_pres(xintp, presRange) if __name__", "pLevelRange self.presRanges = self.make_rg_pres_ranges() self.reduce_presLevels_and_presRanges() @staticmethod def get_dates_set(period=30): \"\"\" create a set of", "intp_pres(self, xintp, presRange): if self.basin: iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin) iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)", "off of depths catagory surface: at 2.5 dbar +- 2.5 shallow: 10 to", "= PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now() dt = endTime", "in presRanges] return presRanges @staticmethod def save_iDF(iDf, filename, tdx): iDf.date = pd.to_datetime(iDf.date) iDf.date", "formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"--maxl\", help=\"start on pressure level\", type=float, nargs='?', default=2000) parser.add_argument(\"--minl\", help=\"end on pressure", "basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\", help=\"name", "values set at xintp for each profile xLab: the column name for the", "for idx in x_dup_idx] # remove none -999 and none y_nan_idx =[idx for", "print(err) xu = [xu[idx] for idx in y_nan_idx] yu = [yu[idx] for idx", "this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\", help=\"start time index\", type=int, nargs='?', default=0) parser.add_argument(\"--logFileName\",", "] presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges presRanges", "''' make a dataframe of interpolated values set at xintp for each profile", "Try to make the query small enough so as to not pass the", "650. , 700. , 750. , 800. , 850. , 900. , 950.", "str(reduceMeas).lower() resp = requests.get(url) # Consider any status other than 2xx an error", "header=False) @staticmethod def record_to_array(measurements, xLab, yLab): x = [] y = [] for", "[x - 15, x + 15] for x in presLevels[19:33] ] deepRanges =", "startDate endDateQuery = '&endDate=' + endDate presRangeQuery = '&presRange=' + presRange intPresQuery =", "30 abbysal: 1100 to 1975 dbar +- 60 \"\"\" stringifyArray = lambda x:", "import OrderedDict, defaultdict class PchipOceanSlices(object): def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False): self.appLocal", "= '&endDate=' + endDate presRangeQuery = '&presRange=' + presRange intPresQuery = '&intPres=' +", "logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp = presLevel", "True elif not profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements']) < 2:", "= datetime.now() dt = endTime - startTime logging.debug('end of log file for pressure", "= datetime.now() pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True) pos.main() endTime = datetime.now()", "elif not profile['date_qc'] in self.qcKeep: reject = True elif len(profile['measurements']) < 2: #", "np.NaN} ] except Exception as err: pdb.set_trace() print(err) xu = [xu[idx] for idx", ", 340. , 360. , 380. , 400. , 420. , 440. ,", ", 170. , 182.5, 200. , 220. , 240. , 260. , 280.", "pressure level\", type=float, nargs='?', default=1975) parser.add_argument(\"--basin\", help=\"filter this basin\", type=str, nargs='?', default=None) parser.add_argument(\"--starttdx\",", "y) if len(x) < 2: # pchip needs at least two points return", "rowDict[yLab] = yintp return rowDict def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'): ''' make", "xintp for each profile xLab: the column name for the interpolation input x", "in presLevels[1:19] ] mediumRanges = [ [x - 15, x + 15] for", "or key not in {-999, None, np.NaN} ] idxs = [] for dup", "stringifyArray = lambda x: str(x).replace(' ', '') surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+", "0: return None if not yLab in meas[0].keys(): return None x, y =", "- 60, x + 60] for x in presLevels[45:] ] presRanges = surfaceRange", "presRanges @staticmethod def make_rg_pres_ranges(): ''' uses pressure ranges defined in RG climatology '''", "err: pdb.set_trace() logging.warning(err) raise Exception return f @staticmethod def make_pres_ranges(presLevels): \"\"\" Pressure ranges", "def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False): ''' query horizontal slice of", "1200. , 1250. , 1300. , 1350. , 1412.5, 1500. , 1600. ,", "loop') logging.debug('running pressure level ranges: {}'.format(self.pLevelRange)) for idx, presLevel in enumerate(self.presLevels): xintp =", "if not key in {-999, None, np.NaN} ] except Exception as err: pdb.set_trace()" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "one page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def", "KIND, either express or implied. # See the License for the specific language", "markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self,", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "= logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE #", "marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers for", "License. # You may obtain a copy of the License at # #", "import keystone.backends.api as api logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def", "tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id,", "\"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self,", "role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self,", "def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one page of role grant", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "and # limitations under the License. \"\"\" Role-Grant manager module \"\"\" import logging", "compliance with the License. # You may obtain a copy of the License", "marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id,", "api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\"", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2011 OpenStack LLC. # #", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id, tenant_id): return self.driver.list_role_grants(role_id,", "you may not use this file except in compliance with the License. #", "return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "governing permissions and # limitations under the License. \"\"\" Role-Grant manager module \"\"\"", "user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id):", "list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit):", "ANY KIND, either express or implied. # See the License for the specific", "role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id, tenant_id): return", "= api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit):", "\"\"\" import logging import keystone.backends.api as api logger = logging.getLogger(__name__) # pylint: disable=C0103", "tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2011 OpenStack LLC. # # Licensed", "in compliance with the License. # You may obtain a copy of the", "return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id):", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "limitations under the License. \"\"\" Role-Grant manager module \"\"\" import logging import keystone.backends.api", "self.driver = api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "for the specific language governing permissions and # limitations under the License. \"\"\"", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Get one page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit)", "specific language governing permissions and # limitations under the License. \"\"\" Role-Grant manager", "limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id)", "See the License for the specific language governing permissions and # limitations under", "disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE # # Role-Grant Methods #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id):", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers for role grants", "# # Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "tenant_id, marker, limit): \"\"\" Get one page of role grant list \"\"\" return", "Calculate pagination markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "OF ANY KIND, either express or implied. # See the License for the", "logging import keystone.backends.api as api logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object):", "def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def", "2.0 (the \"License\"); # you may not use this file except in compliance", "self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return", "limit): \"\"\" Get one page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id,", "of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id,", "# you may not use this file except in compliance with the License.", "shiftwidth=4 softtabstop=4 # # Copyright (C) 2011 OpenStack LLC. # # Licensed under", "__init__(self): self.driver = api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id,", "agreed to in writing, software # distributed under the License is distributed on", "class Manager(object): def __init__(self): self.driver = api.ROLE # # Role-Grant Methods # def", "import logging import keystone.backends.api as api logger = logging.getLogger(__name__) # pylint: disable=C0103 class", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def", "rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id)", "def __init__(self): self.driver = api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self, user_id,", "\"\"\" Get one page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker,", "tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers", "Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one page of", "def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def", "(the \"License\"); # you may not use this file except in compliance with", "# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2011 OpenStack LLC. #", "\"\"\" Calculate pagination markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker,", "# # Unless required by applicable law or agreed to in writing, software", "language governing permissions and # limitations under the License. \"\"\" Role-Grant manager module", "# Copyright (C) 2011 OpenStack LLC. # # Licensed under the Apache License,", "def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id, tenant_id): return self.driver.list_role_grants(role_id, user_id,", "express or implied. # See the License for the specific language governing permissions", "user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "api logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self): self.driver =", "except in compliance with the License. # You may obtain a copy of", "# limitations under the License. \"\"\" Role-Grant manager module \"\"\" import logging import", "user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers for role grants list \"\"\"", "(C) 2011 OpenStack LLC. # # Licensed under the Apache License, Version 2.0", "by applicable law or agreed to in writing, software # distributed under the", "the License. \"\"\" Role-Grant manager module \"\"\" import logging import keystone.backends.api as api", "2011 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the", "Copyright (C) 2011 OpenStack LLC. # # Licensed under the Apache License, Version", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id)", "limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers for role", "either express or implied. # See the License for the specific language governing", "permissions and # limitations under the License. \"\"\" Role-Grant manager module \"\"\" import", "tenant_id, marker, limit): \"\"\" Calculate pagination markers for role grants list \"\"\" return", "return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "user_id, tenant_id, marker, limit): \"\"\" Get one page of role grant list \"\"\"", "pylint: disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE # # Role-Grant Methods", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id,", "logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE # #", "file except in compliance with the License. # You may obtain a copy", "limit): \"\"\" Calculate pagination markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id,", "logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE", "tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id,", "the specific language governing permissions and # limitations under the License. \"\"\" Role-Grant", "list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return", "License. \"\"\" Role-Grant manager module \"\"\" import logging import keystone.backends.api as api logger", "rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination markers for role grants list", "the License. # You may obtain a copy of the License at #", "tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id,", "keystone.backends.api as api logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self):", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\"", "role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id,", "Manager(object): def __init__(self): self.driver = api.ROLE # # Role-Grant Methods # def rolegrant_get_page(self,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "# # Copyright (C) 2011 OpenStack LLC. # # Licensed under the Apache", "implied. # See the License for the specific language governing permissions and #", "under the License. \"\"\" Role-Grant manager module \"\"\" import logging import keystone.backends.api as", "\"License\"); # you may not use this file except in compliance with the", "grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id)", "page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self,", "def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return", "Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one page", "rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one page of role grant list", "for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id):", "as api logger = logging.getLogger(__name__) # pylint: disable=C0103 class Manager(object): def __init__(self): self.driver", "applicable law or agreed to in writing, software # distributed under the License", "return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate", "def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id,", "self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id, tenant_id):", "marker, limit): \"\"\" Calculate pagination markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id,", "list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def", "list_tenant_roles_for_user(self, user_id, tenant_id): return self.driver.list_tenant_roles_for_user(user_id, tenant_id) def rolegrant_list_by_role(self, role_id): return self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self,", "LLC. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "softtabstop=4 # # Copyright (C) 2011 OpenStack LLC. # # Licensed under the", "\"\"\" Role-Grant manager module \"\"\" import logging import keystone.backends.api as api logger =", "self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit): \"\"\" Calculate pagination", "rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id, tenant_id): return self.driver.list_role_grants(role_id, user_id, tenant_id)", "grant list \"\"\" return self.driver.rolegrant_get_page(user_id, tenant_id, marker, limit) def rolegrant_get_page_markers(self, user_id, tenant_id, marker,", "or agreed to in writing, software # distributed under the License is distributed", "Role-Grant manager module \"\"\" import logging import keystone.backends.api as api logger = logging.getLogger(__name__)", "or implied. # See the License for the specific language governing permissions and", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "pagination markers for role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def", "self.driver.rolegrant_list_by_role(role_id) def rolegrant_get_by_ids(self, user_id, role_id, tenant_id): return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id):", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "manager module \"\"\" import logging import keystone.backends.api as api logger = logging.getLogger(__name__) #", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "marker, limit): \"\"\" Get one page of role grant list \"\"\" return self.driver.rolegrant_get_page(user_id,", "# Role-Grant Methods # def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one", "# pylint: disable=C0103 class Manager(object): def __init__(self): self.driver = api.ROLE # # Role-Grant", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# def rolegrant_get_page(self, user_id, tenant_id, marker, limit): \"\"\" Get one page of role", "module \"\"\" import logging import keystone.backends.api as api logger = logging.getLogger(__name__) # pylint:", "in writing, software # distributed under the License is distributed on an \"AS", "role grants list \"\"\" return self.driver.rolegrant_get_page_markers(user_id, tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return", "return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id) def rolegrant_delete(self, grant_id): return self.driver.rolegrant_delete(grant_id) def list_role_grants(self, role_id, user_id,", "tenant_id, marker, limit) def list_global_roles_for_user(self, user_id): return self.driver.list_global_roles_for_user(user_id) def list_tenant_roles_for_user(self, user_id, tenant_id): return", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "Carnegie Mellon University Database Group # import logging from website.models import DBMSCatalog, MetricCatalog", "__init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time", "* average_wait total_wait_time += wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self):", "= float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application; 2: Configuration;", "I/O') or \\ any(n in name for n in extra_io_metrics): if not any(n", "super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list", "improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj", "total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'): #", "'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time =", "in extra_io_metrics): if not any(n in name for n in not_io_metrics): if default_total_waits", "average_wait = default_wait_time / default_total_waits wait_time = total_waits * average_wait total_wait_time += wait_time", "target_objective.py # # Copyright (c) 2017-18, Carnegie Mellon University Database Group # import", "return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds',", "/ 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)", "Group # import logging from website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType", "logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def", "target objective is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values =", "{} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE,", "metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self):", "name for n in not_io_metrics): if default_total_waits == 0: average_wait = 0 else:", "float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time = 0 elif value in", "name: continue if 'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in", "total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in", "short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed for Oracle v12.2.0.1.0 dbms =", "def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self,", "improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits',", "from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG =", "def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective", "n in not_io_metrics): if default_total_waits == 0: average_wait = 0 else: average_wait =", "if value == 'Idle': wait_time = 0 elif value in ('User I/O', 'System", "def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time):", "= sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective):", "= tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user", "name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application;", "= DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default", "('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time',", "cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name])", "5: Commit; 6: Idle; 7: Network; 8: User I/O; 9: System I/O if", "in not_io_metrics): if default_total_waits == 0: average_wait = 0 else: average_wait = default_wait_time", "MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds',", "def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return", "unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in", "'Idle': wait_time = 0 total_wait_time += wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective):", "does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name,", "def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms):", "super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist =", "metrics.items(): if has_dba_hist and 'dba_hist_' not in name: continue if 'db cpu' in", "cleaning if & only if it does not exist before cleaning has_dba_hist =", "Configuration; 3: Administrative; 4: Concurrency; # 5: Commit; 6: Idle; 7: Network; 8:", "..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self):", "/ 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions',", "time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter',", "float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time = 0", "not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value", "'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name:", "observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] /", "= 0. # dba_hist db_time will be 0 after cleaning if & only", "MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {}", "'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other;", "wait_time = 0 total_wait_time += wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def", "'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value", "self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms", "BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB", "wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application; 2:", "= [\"log file sync\"] not_io_metrics = [\"read by other session\"] total_wait_time = 0.", "3: Administrative; 4: Concurrency; # 5: Commit; 6: Idle; 7: Network; 8: User", "__init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics,", "import logging from website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective", "= logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)", "+= float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif", "observation_time): total_wait_time = 0. # dba_hist db_time will be 0 after cleaning if", "= metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return", "class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This", "= float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif", "= float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time =", "super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time =", "metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log", "unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj)", "for n in extra_io_metrics): if not any(n in name for n in not_io_metrics):", "LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s',", "1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def", "(c) 2017-18, Carnegie Mellon University Database Group # import logging from website.models import", "elif name.endswith('wait_class'): if value == 'Idle': wait_time = 0 elif value in ('User", "default_wait_time / default_total_waits wait_time = total_waits * average_wait total_wait_time += wait_time return total_wait_time", "= 0 else: average_wait = default_wait_time / default_total_waits wait_time = total_waits * average_wait", "value == 'Idle': wait_time = 0 total_wait_time += wait_time return total_wait_time / 1000000.", "ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time',", "only if it does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] >", "in metrics.items(): if has_dba_hist and 'dba_hist_' not in name: continue if 'db cpu'", "'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time =", "metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read by other session\"]", "1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn',", "time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time']", "# pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(), NormalizedDBTime(), RawDBTime(), TransactionCounter(), ElapsedTime(), ])", "= 0 total_wait_time += wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self):", "0 else: average_wait = default_wait_time / default_total_waits wait_time = total_waits * average_wait total_wait_time", "v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name]", "in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed", "name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time =", "for name, value in metrics.items(): if has_dba_hist and 'dba_hist_' not in name: continue", "in name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1:", "average_wait = 0 else: average_wait = default_wait_time / default_total_waits wait_time = total_waits *", "unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0", "observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns", "in name for n in not_io_metrics): if default_total_waits == 0: average_wait = 0", "0. # dba_hist db_time will be 0 after cleaning if & only if", "total_wait_time += wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw", "extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read by other session\"] total_wait_time =", "name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4:", "University Database Group # import logging from website.models import DBMSCatalog, MetricCatalog from website.types", "improvement=LESS_IS_BETTER) # This target objective is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE,", "from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def", "in ('User I/O', 'System I/O') or \\ any(n in name for n in", "target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits',", "DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0. #", "/ 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)", "float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: #", "total_waits = float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time = 0 elif", "any(n in name for n in extra_io_metrics): if not any(n in name for", "short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if", "MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"]", "name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value == 'Idle':", "/ default_total_waits wait_time = total_waits * average_wait total_wait_time += wait_time return total_wait_time /", "def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user", "= metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics =", "default_total_waits wait_time = total_waits * average_wait total_wait_time += wait_time return total_wait_time / 1000000.", "return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits", "if default_total_waits == 0: average_wait = 0 else: average_wait = default_wait_time / default_total_waits", "def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time):", "# import logging from website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType from", "# # Copyright (c) 2017-18, Carnegie Mellon University Database Group # import logging", "in name: continue if 'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg'", "wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time',", "exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in", "session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value", "CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics,", "elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application; 2: Configuration; 3: Administrative;", "= metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items(): if has_dba_hist and", "wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits = float(value)", "1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of", "name, value in metrics.items(): if has_dba_hist and 'dba_hist_' not in name: continue if", "not_io_metrics = [\"read by other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time']", "value in metrics.items(): if has_dba_hist and 'dba_hist_' not in name: continue if 'db", "== 'Idle': wait_time = 0 total_wait_time += wait_time return total_wait_time / 1000000. class", "compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [", "metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and", "in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(), NormalizedDBTime(), RawDBTime(), TransactionCounter(),", "Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms):", "metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ #", "8: User I/O; 9: System I/O if value == 'Idle': wait_time = 0", "class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self,", "DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time']", "2: Configuration; 3: Administrative; 4: Concurrency; # 5: Commit; 6: Idle; 7: Network;", "= {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time):", "0 for name, value in metrics.items(): if has_dba_hist and 'dba_hist_' not in name:", "DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER)", "has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items(): if has_dba_hist", "cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items(): if", "I/O', 'System I/O') or \\ any(n in name for n in extra_io_metrics): if", "time'] > 0 for name, value in metrics.items(): if has_dba_hist and 'dba_hist_' not", "I/O; 9: System I/O if value == 'Idle': wait_time = 0 total_wait_time +=", "observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint:", "def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return", "# 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency; # 5:", "wait_time = total_waits * average_wait total_wait_time += wait_time return total_wait_time / 1000000. class", "in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value) elif", "= [\"read by other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] >", "name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'):", "1: Application; 2: Configuration; 3: Administrative; 4: Concurrency; # 5: Commit; 6: Idle;", "0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items(): if", "short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user", "for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(), NormalizedDBTime(),", "+= wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB", "any(n in name for n in not_io_metrics): if default_total_waits == 0: average_wait =", "has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective):", "# # OtterTune - target_objective.py # # Copyright (c) 2017-18, Carnegie Mellon University", "compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read by other", "[\"read by other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0", "super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time):", "class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics,", "rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)", "pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed for", "metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db", "before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items():", "Application; 2: Configuration; 3: Administrative; 4: Concurrency; # 5: Commit; 6: Idle; 7:", "I/O if value == 'Idle': wait_time = 0 total_wait_time += wait_time return total_wait_time", "in name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits", "compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist db_time will be 0 after", "self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for", "& only if it does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time']", "/ 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number", "__init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is", "default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name])", "metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in", "unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed for Oracle v12.2.0.1.0 dbms", "default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time", "logging from website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective import", "9: System I/O if value == 'Idle': wait_time = 0 total_wait_time += wait_time", "improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist db_time will be", "has_dba_hist and 'dba_hist_' not in name: continue if 'db cpu' in name: total_wait_time", "> 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] /", "pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns", "# Copyright (c) 2017-18, Carnegie Mellon University Database Group # import logging from", "0 elif value in ('User I/O', 'System I/O') or \\ any(n in name", "self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics,", "version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self):", "dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] =", "tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')),", "for n in not_io_metrics): if default_total_waits == 0: average_wait = 0 else: average_wait", "metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time']", "in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time", "wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time',", "ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time):", "[\"log file sync\"] not_io_metrics = [\"read by other session\"] total_wait_time = 0. has_dba_hist", "System I/O if value == 'Idle': wait_time = 0 total_wait_time += wait_time return", "commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr]", "1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) #", "from website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective,", "0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000.", "other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name,", "Administrative; 4: Concurrency; # 5: Commit; 6: Idle; 7: Network; 8: User I/O;", "is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for", "- target_objective.py # # Copyright (c) 2017-18, Carnegie Mellon University Database Group #", "observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user", "float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg'", "TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def", "4: Concurrency; # 5: Commit; 6: Idle; 7: Network; 8: User I/O; 9:", "in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values =", "metrics, observation_time): total_wait_time = 0. # dba_hist db_time will be 0 after cleaning", "total_wait_time += wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized", "cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value)", "rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr", "db_time will be 0 after cleaning if & only if it does not", "\\ any(n in name for n in extra_io_metrics): if not any(n in name", "pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db", "commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds',", "has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000.", "unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist db_time", "return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name", "not_io_metrics): if default_total_waits == 0: average_wait = 0 else: average_wait = default_wait_time /", "if value == 'Idle': wait_time = 0 total_wait_time += wait_time return total_wait_time /", "'Idle': wait_time = 0 elif value in ('User I/O', 'System I/O') or \\", "website.models import DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput,", "DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective):", "self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics", "7: Network; 8: User I/O; 9: System I/O if value == 'Idle': wait_time", "def compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in", "and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for", "class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER)", "elif 'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#: # 0:", "by other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for", "in name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if value ==", "import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time',", "('User I/O', 'System I/O') or \\ any(n in name for n in extra_io_metrics):", "for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')", "+= wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB", "for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in", "0: average_wait = 0 else: average_wait = default_wait_time / default_total_waits wait_time = total_waits", "time'] / 1000000. class TransactionCounter(BaseTargetObjective): def __init__(self): super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks',", "return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class TransactionCounter(BaseTargetObjective): def", "Database Group # import logging from website.models import DBMSCatalog, MetricCatalog from website.types import", "Copyright (c) 2017-18, Carnegie Mellon University Database Group # import logging from website.models", "float(value) elif name.endswith('wait_class'): # wait_class#: # 0: Other; 1: Application; 2: Configuration; 3:", "Concurrency; # 5: Commit; 6: Idle; 7: Network; 8: User I/O; 9: System", "Idle; 7: Network; 8: User I/O; 9: System I/O if value == 'Idle':", "sync\"] not_io_metrics = [\"read by other session\"] total_wait_time = 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db", "= total_waits * average_wait total_wait_time += wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective):", "value == 'Idle': wait_time = 0 elif value in ('User I/O', 'System I/O')", "observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read by other session\"] total_wait_time", "file sync\"] not_io_metrics = [\"read by other session\"] total_wait_time = 0. has_dba_hist =", "in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log file", "not in name: continue if 'db cpu' in name: total_wait_time += float(value) elif", "sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def", "Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency; # 5: Commit; 6:", "or \\ any(n in name for n in extra_io_metrics): if not any(n in", "return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds',", "wait_class#: # 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency; #", "target_obj) for target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(),", "for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics =", "float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'):", "after cleaning if & only if it does not exist before cleaning has_dba_hist", "objective is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {}", "improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist:", "DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed for Oracle", "dba_hist db_time will be 0 after cleaning if & only if it does", "it does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for", "total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s',", "= 0. has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items():", "num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self,", "Commit; 6: Idle; 7: Network; 8: User I/O; 9: System I/O if value", "Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE,", "# 5: Commit; 6: Idle; 7: Network; 8: User I/O; 9: System I/O", "designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric", "> 0 for name, value in metrics.items(): if has_dba_hist and 'dba_hist_' not in", "pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list =", "__init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time", "compute(self, metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks'))", "'System I/O') or \\ any(n in name for n in extra_io_metrics): if not", "= 0 elif value in ('User I/O', 'System I/O') or \\ any(n in", "name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits =", "= {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms =", "if & only if it does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db", "extra_io_metrics): if not any(n in name for n in not_io_metrics): if default_total_waits ==", "compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0 if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db", "Network; 8: User I/O; 9: System I/O if value == 'Idle': wait_time =", "n in extra_io_metrics): if not any(n in name for n in not_io_metrics): if", "'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s',", "RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics,", "average_wait total_wait_time += wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time',", "continue if 'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name:", "== 'Idle': wait_time = 0 elif value in ('User I/O', 'System I/O') or", "2017-18, Carnegie Mellon University Database Group # import logging from website.models import DBMSCatalog,", "if not any(n in name for n in not_io_metrics): if default_total_waits == 0:", "elif value in ('User I/O', 'System I/O') or \\ any(n in name for", "def compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist db_time will be 0", "# OtterTune - target_objective.py # # Copyright (c) 2017-18, Carnegie Mellon University Database", "elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits = float(value) elif name.endswith('wait_class'): if", "return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self): super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def", "[ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(), NormalizedDBTime(), RawDBTime(), TransactionCounter(), ElapsedTime(),", "# wait_class#: # 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency;", "for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class ElapsedTime(BaseTargetObjective): def __init__(self):", "website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__)", "= default_wait_time / default_total_waits wait_time = total_waits * average_wait total_wait_time += wait_time return", "'dba_hist_' not in name: continue if 'db cpu' in name: total_wait_time += float(value)", "if has_dba_hist and 'dba_hist_' not in name: continue if 'db cpu' in name:", "name for n in extra_io_metrics): if not any(n in name for n in", "Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist", "short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0. # dba_hist db_time will", "total_wait_time = 0. # dba_hist db_time will be 0 after cleaning if &", "User I/O; 9: System I/O if value == 'Idle': wait_time = 0 total_wait_time", "version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self,", "= float(value) elif name.endswith('wait_class'): if value == 'Idle': wait_time = 0 elif value", "class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self,", "import DBMSCatalog, MetricCatalog from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER,", "This target objective is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values", "0 after cleaning if & only if it does not exist before cleaning", "0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency; # 5: Commit;", "{} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def compute(self, metrics, observation_time): extra_io_metrics", "0 total_wait_time += wait_time return total_wait_time / 1000000. class NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time',", "and 'dba_hist_' not in name: continue if 'db cpu' in name: total_wait_time +=", "if 'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time", "wait_time = 0 elif value in ('User I/O', 'System I/O') or \\ any(n", "total_wait_time += float(value) elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time = float(value)", "if has_dba_hist: return metrics['global.dba_hist_sys_time_model.db time'] / 1000000. return metrics['global.sys_time_model.db time'] / 1000000. class", "LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom DB Time',", "pprint='Custom DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): total_wait_time = 0.", "DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def", "num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return num_txns class", "Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed for Oracle v12.2.0.1.0", "short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): return observation_time target_objective_list = tuple((DBMSType.ORACLE, target_obj) for", "import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class", "= float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits", "be 0 after cleaning if & only if it does not exist before", "target_obj in [ # pylint: disable=invalid-name BaseThroughput(transactions_counter=('global.sysstat.user commits', 'global.sysstat.user rollbacks')), CustomDBTime(), NormalizedDBTime(), RawDBTime(),", "super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target objective is designed", "+= float(value) elif 'time_waited_micro_fg' in name: wait_time = float(value) elif name.endswith('wait_class'): # wait_class#:", "MetricCatalog from website.types import DBMSType from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG", "if 'db cpu' in name: total_wait_time += float(value) elif 'time_waited_micro_fg' in name: wait_time", "metric.default def compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read", "Mellon University Database Group # import logging from website.models import DBMSCatalog, MetricCatalog from", "total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s',", "in name for n in extra_io_metrics): if not any(n in name for n", "(BaseTargetObjective, BaseThroughput, LESS_IS_BETTER, MORE_IS_BETTER) LOG = logging.getLogger(__name__) class CustomDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='custom_db_time', pprint='Custom", "not any(n in name for n in not_io_metrics): if default_total_waits == 0: average_wait", "will be 0 after cleaning if & only if it does not exist", "reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name]", "def compute(self, metrics, observation_time): extra_io_metrics = [\"log file sync\"] not_io_metrics = [\"read by", "OtterTune - target_objective.py # # Copyright (c) 2017-18, Carnegie Mellon University Database Group", "else: average_wait = default_wait_time / default_total_waits wait_time = total_waits * average_wait total_wait_time +=", "__init__(self): super().__init__(name='raw_db_time', pprint='Raw DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist", "name.endswith('wait_class'): if value == 'Idle': wait_time = 0 elif value in ('User I/O',", "value in ('User I/O', 'System I/O') or \\ any(n in name for n", "== 0: average_wait = 0 else: average_wait = default_wait_time / default_total_waits wait_time =", "Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) def compute(self, metrics, observation_time): has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] >", "metrics['global.dba_hist_sys_time_model.db time'] > 0 for name, value in metrics.items(): if has_dba_hist and 'dba_hist_'", "metrics, observation_time): num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits', 'global.sysstat.user rollbacks')) return", "float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in name: default_total_waits = float(self.default_values[name]) total_waits =", "NormalizedDBTime(BaseTargetObjective): def __init__(self): super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds', short_unit='s', improvement=LESS_IS_BETTER) # This target", "of commits and rollbacks', unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER) def compute(self, metrics, observation_time): num_txns =", "# dba_hist db_time will be 0 after cleaning if & only if it", "elif 'time_waited_micro_fg' in name: default_wait_time = float(self.default_values[name]) wait_time = float(value) elif 'total_waits_fg' in", "6: Idle; 7: Network; 8: User I/O; 9: System I/O if value ==", "= metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values = {} for metric", "total_waits * average_wait total_wait_time += wait_time return total_wait_time / 1000000. class RawDBTime(BaseTargetObjective): def", "default_total_waits == 0: average_wait = 0 else: average_wait = default_wait_time / default_total_waits wait_time", "metric in MetricCatalog.objects.filter(dbms=dbms): self.default_values[metric.name] = metric.default def reload_default_metrics(self): dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0') self.default_values", "if it does not exist before cleaning has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0", "# This target objective is designed for Oracle v12.2.0.1.0 dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')" ]
[ "be a previous valid character to match. \"\"\" class Solution1: def __match(self, s:", "p[j - 2] == s[i - 1]) and dp[i - 1][j]) return dp[m][n]", "0 <= p.length <= 30 s contains only lowercase English letters. p contains", "Input: s = \"aa\", p = \"a*\" Output: true Explanation: '*' means zero", "return self.__match(s + '\\0', p + '\\0') class Solution2: def isMatch(self, s: str,", "\"c*a*b\" Output: true Explanation: c can be repeated 0 times, a can be", "p[j - 1] == s[i - 1]) and dp[i - 1][j - 1]", "dp[1][1] = p[0] == '.' or p[0] == s[0] for i in range(1,", "str) -> bool: if p[0] == '\\0': return s[0] == '\\0' if s[0]", "in range(2, n + 1): if p[j - 1] != '*': dp[i][j] =", "s contains only lowercase English letters. p contains only lowercase English letters, '.',", "'*' means zero or more of the preceding element, 'a'. Therefore, by repeating", "(not partial). Example 1: Input: s = \"aa\", p = \"a\" Output: false", "* (n + 1) for _ in range(m + 1)] dp[0][0] = True", "'\\0') class Solution2: def isMatch(self, s: str, p: str) -> bool: m, n", "- 1] == '*': dp[0][j] = True else: break if m > 0:", "\"a\" does not match the entire string \"aa\". Example 2: Input: s =", "2] or ((p[j - 2] == '.' or p[j - 2] == s[i", "will be a previous valid character to match. \"\"\" class Solution1: def __match(self,", "1)] dp[0][0] = True for j in range(2, n + 1, 2): if", "p = \"c*a*b\" Output: true Explanation: c can be repeated 0 times, a", "repeated 1 time. Therefore, it matches \"aab\". Example 5: Input: s = \"mississippi\",", "does not match the entire string \"aa\". Example 2: Input: s = \"aa\",", "> 0: dp[1][1] = p[0] == '.' or p[0] == s[0] for i", "for '.' and '*' where: '.' Matches any single character. '*' Matches zero", "1] == '.' or p[j - 1] == s[i - 1]) and dp[i", "bool: return self.__match(s + '\\0', p + '\\0') class Solution2: def isMatch(self, s:", "or ((p[j - 2] == '.' or p[j - 2] == s[i -", "means zero or more of the preceding element, 'a'. Therefore, by repeating 'a'", "<= 30 s contains only lowercase English letters. p contains only lowercase English", "Explanation: \"a\" does not match the entire string \"aa\". Example 2: Input: s", "preceding element. The matching should cover the entire input string (not partial). Example", "dp[0][0] = True for j in range(2, n + 1, 2): if p[j", "s[i - 1]) and dp[i - 1][j - 1] else: dp[i][j] = dp[i][j", "if s[0] == '\\0': return p[1] == '*' and self.isMatch(s, p[2:]) if p[1]", "'*' Matches zero or more of the preceding element. The matching should cover", "'\\0': return p[1] == '*' and self.isMatch(s, p[2:]) if p[1] == '*': if", "s = \"ab\", p = \".*\" Output: true Explanation: \".*\" means \"zero or", "= [[False] * (n + 1) for _ in range(m + 1)] dp[0][0]", "str) -> bool: m, n = len(s), len(p) if n == 0: return", "20 0 <= p.length <= 30 s contains only lowercase English letters. p", "it matches \"aab\". Example 5: Input: s = \"mississippi\", p = \"mis*is*p*.\" Output:", "str) -> bool: return self.__match(s + '\\0', p + '\\0') class Solution2: def", "len(s), len(p) if n == 0: return m == 0 dp = [[False]", "should cover the entire input string (not partial). Example 1: Input: s =", "0 times, a can be repeated 1 time. Therefore, it matches \"aab\". Example", "repeated 0 times, a can be repeated 1 time. Therefore, it matches \"aab\".", "zero or more of the preceding element. The matching should cover the entire", "Example 1: Input: s = \"aa\", p = \"a\" Output: false Explanation: \"a\"", "(s) and a pattern (p), implement regular expression matching with support for '.'", "<= 20 0 <= p.length <= 30 s contains only lowercase English letters.", "(p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s:", "Solution2: def isMatch(self, s: str, p: str) -> bool: m, n = len(s),", "string \"aa\". Example 2: Input: s = \"aa\", p = \"a*\" Output: true", "+ 1, 2): if p[j - 1] == '*': dp[0][j] = True else:", "return m == 0 dp = [[False] * (n + 1) for _", "\".*\" Output: true Explanation: \".*\" means \"zero or more (*) of any character", "= \"a\" Output: false Explanation: \"a\" does not match the entire string \"aa\".", "p[2:]) if p[1] == '*': if p[0] == '.' or p[0] == s[0]:", "self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] == '.' or p[0] == s[0])", "1] == '*': dp[0][j] = True else: break if m > 0: dp[1][1]", "element. The matching should cover the entire input string (not partial). Example 1:", "0 dp = [[False] * (n + 1) for _ in range(m +", "for each appearance of the character '*', there will be a previous valid", "to match. \"\"\" class Solution1: def __match(self, s: str, p: str) -> bool:", "[[False] * (n + 1) for _ in range(m + 1)] dp[0][0] =", "there will be a previous valid character to match. \"\"\" class Solution1: def", "Example 3: Input: s = \"ab\", p = \".*\" Output: true Explanation: \".*\"", "'*' where: '.' Matches any single character. '*' Matches zero or more of", "pattern (p), implement regular expression matching with support for '.' and '*' where:", "Input: s = \"ab\", p = \".*\" Output: true Explanation: \".*\" means \"zero", "or more (*) of any character (.)\". Example 4: Input: s = \"aab\",", "str, p: str) -> bool: return self.__match(s + '\\0', p + '\\0') class", "the entire input string (not partial). Example 1: Input: s = \"aa\", p", "== '\\0' if s[0] == '\\0': return p[1] == '*' and self.isMatch(s, p[2:])", "and dp[i - 1][j - 1] else: dp[i][j] = dp[i][j - 2] or", "\"aa\". Example 2: Input: s = \"aa\", p = \"a*\" Output: true Explanation:", "single character. '*' Matches zero or more of the preceding element. The matching", "input string (not partial). Example 1: Input: s = \"aa\", p = \"a\"", "of the preceding element. The matching should cover the entire input string (not", "+ 1): for j in range(2, n + 1): if p[j - 1]", "Example 5: Input: s = \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0", "- 1]) and dp[i - 1][j - 1] else: dp[i][j] = dp[i][j -", "self.__match(s + '\\0', p + '\\0') class Solution2: def isMatch(self, s: str, p:", "p = \"a*\" Output: true Explanation: '*' means zero or more of the", "entire input string (not partial). Example 1: Input: s = \"aa\", p =", "cover the entire input string (not partial). Example 1: Input: s = \"aa\",", "lowercase English letters. p contains only lowercase English letters, '.', and '*'. It", "else: break if m > 0: dp[1][1] = p[0] == '.' or p[0]", "more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes", "string (not partial). Example 1: Input: s = \"aa\", p = \"a\" Output:", "p: str) -> bool: m, n = len(s), len(p) if n == 0:", "True else: break if m > 0: dp[1][1] = p[0] == '.' or", "bool: m, n = len(s), len(p) if n == 0: return m ==", "match. \"\"\" class Solution1: def __match(self, s: str, p: str) -> bool: if", "2): if p[j - 1] == '*': dp[0][j] = True else: break if", "== '.' or p[0] == s[0] for i in range(1, m + 1):", "= len(s), len(p) if n == 0: return m == 0 dp =", "= (p[j - 1] == '.' or p[j - 1] == s[i -", "c can be repeated 0 times, a can be repeated 1 time. Therefore,", "dp = [[False] * (n + 1) for _ in range(m + 1)]", "return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self,", "The matching should cover the entire input string (not partial). Example 1: Input:", "true Explanation: c can be repeated 0 times, a can be repeated 1", "in range(2, n + 1, 2): if p[j - 1] == '*': dp[0][j]", "not match the entire string \"aa\". Example 2: Input: s = \"aa\", p", "p[0] == '\\0': return s[0] == '\\0' if s[0] == '\\0': return p[1]", "letters, '.', and '*'. It is guaranteed for each appearance of the character", "\"aa\", p = \"a\" Output: false Explanation: \"a\" does not match the entire", "or p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str)", "\"zero or more (*) of any character (.)\". Example 4: Input: s =", "input string (s) and a pattern (p), implement regular expression matching with support", "Matches zero or more of the preceding element. The matching should cover the", "'*': dp[i][j] = (p[j - 1] == '.' or p[j - 1] ==", "Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by", "and self.isMatch(s, p[2:]) if p[1] == '*': if p[0] == '.' or p[0]", "-> bool: if p[0] == '\\0': return s[0] == '\\0' if s[0] ==", "becomes \"aa\". Example 3: Input: s = \"ab\", p = \".*\" Output: true", "repeating 'a' once, it becomes \"aa\". Example 3: Input: s = \"ab\", p", "self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str) -> bool: return self.__match(s +", "p[0] == '.' or p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p)", "True for j in range(2, n + 1, 2): if p[j - 1]", "\"\"\" Given an input string (s) and a pattern (p), implement regular expression", "regular expression matching with support for '.' and '*' where: '.' Matches any", "(.)\". Example 4: Input: s = \"aab\", p = \"c*a*b\" Output: true Explanation:", "= \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0 <= s.length <= 20", "'.' or p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p:", "- 1] != '*': dp[i][j] = (p[j - 1] == '.' or p[j", "character '*', there will be a previous valid character to match. \"\"\" class", "in range(m + 1)] dp[0][0] = True for j in range(2, n +", "'*', there will be a previous valid character to match. \"\"\" class Solution1:", "(p[j - 1] == '.' or p[j - 1] == s[i - 1])", "dp[i][j - 2] or ((p[j - 2] == '.' or p[j - 2]", "isMatch(self, s: str, p: str) -> bool: m, n = len(s), len(p) if", "once, it becomes \"aa\". Example 3: Input: s = \"ab\", p = \".*\"", "and a pattern (p), implement regular expression matching with support for '.' and", "def __match(self, s: str, p: str) -> bool: if p[0] == '\\0': return", "or p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:])", "p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] == '.' or p[0]", "of the character '*', there will be a previous valid character to match.", "if n == 0: return m == 0 dp = [[False] * (n", "lowercase English letters, '.', and '*'. It is guaranteed for each appearance of", "if p[0] == '.' or p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:],", "5: Input: s = \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0 <=", "3: Input: s = \"ab\", p = \".*\" Output: true Explanation: \".*\" means", "range(2, n + 1): if p[j - 1] != '*': dp[i][j] = (p[j", "'*' and self.isMatch(s, p[2:]) if p[1] == '*': if p[0] == '.' or", "1, 2): if p[j - 1] == '*': dp[0][j] = True else: break", "\"mis*is*p*.\" Output: false Constraints: 0 <= s.length <= 20 0 <= p.length <=", "dp[i][j] = dp[i][j - 2] or ((p[j - 2] == '.' or p[j", "0: return m == 0 dp = [[False] * (n + 1) for", "s[0] == '\\0': return p[1] == '*' and self.isMatch(s, p[2:]) if p[1] ==", "== '*' and self.isMatch(s, p[2:]) if p[1] == '*': if p[0] == '.'", "+ '\\0', p + '\\0') class Solution2: def isMatch(self, s: str, p: str)", "'.' or p[j - 2] == s[i - 1]) and dp[i - 1][j])", "p contains only lowercase English letters, '.', and '*'. It is guaranteed for", "= \"aab\", p = \"c*a*b\" Output: true Explanation: c can be repeated 0", "= \"aa\", p = \"a*\" Output: true Explanation: '*' means zero or more", "Example 4: Input: s = \"aab\", p = \"c*a*b\" Output: true Explanation: c", "string (s) and a pattern (p), implement regular expression matching with support for", "is guaranteed for each appearance of the character '*', there will be a", "p[1:]) def isMatch(self, s: str, p: str) -> bool: return self.__match(s + '\\0',", "= \"mis*is*p*.\" Output: false Constraints: 0 <= s.length <= 20 0 <= p.length", "s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str) -> bool: return", "(*) of any character (.)\". Example 4: Input: s = \"aab\", p =", "= \"aa\", p = \"a\" Output: false Explanation: \"a\" does not match the", "contains only lowercase English letters, '.', and '*'. It is guaranteed for each", "n = len(s), len(p) if n == 0: return m == 0 dp", "== '*': if p[0] == '.' or p[0] == s[0]: return self.__match(s, p[2:])", "str, p: str) -> bool: m, n = len(s), len(p) if n ==", "contains only lowercase English letters. p contains only lowercase English letters, '.', and", "i in range(1, m + 1): for j in range(2, n + 1):", "for i in range(1, m + 1): for j in range(2, n +", "false Explanation: \"a\" does not match the entire string \"aa\". Example 2: Input:", "implement regular expression matching with support for '.' and '*' where: '.' Matches", "\"a\" Output: false Explanation: \"a\" does not match the entire string \"aa\". Example", "p: str) -> bool: if p[0] == '\\0': return s[0] == '\\0' if", "can be repeated 0 times, a can be repeated 1 time. Therefore, it", "for _ in range(m + 1)] dp[0][0] = True for j in range(2,", "p[0] == '.' or p[0] == s[0] for i in range(1, m +", "'*': dp[0][j] = True else: break if m > 0: dp[1][1] = p[0]", "zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once,", "preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\". Example 3:", "j in range(2, n + 1): if p[j - 1] != '*': dp[i][j]", "self.__match(s, p[2:]) return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:])", "where: '.' Matches any single character. '*' Matches zero or more of the", "(p), implement regular expression matching with support for '.' and '*' where: '.'", "for j in range(2, n + 1, 2): if p[j - 1] ==", "or p[0] == s[0] for i in range(1, m + 1): for j", "p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return", "n + 1, 2): if p[j - 1] == '*': dp[0][j] = True", "Output: false Explanation: \"a\" does not match the entire string \"aa\". Example 2:", "= p[0] == '.' or p[0] == s[0] for i in range(1, m", "s: str, p: str) -> bool: m, n = len(s), len(p) if n", "= dp[i][j - 2] or ((p[j - 2] == '.' or p[j -", "__match(self, s: str, p: str) -> bool: if p[0] == '\\0': return s[0]", "== '.' or p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str,", "dp[i][j] = (p[j - 1] == '.' or p[j - 1] == s[i", "= \"ab\", p = \".*\" Output: true Explanation: \".*\" means \"zero or more", "1 time. Therefore, it matches \"aab\". Example 5: Input: s = \"mississippi\", p", "p[1] == '*': if p[0] == '.' or p[0] == s[0]: return self.__match(s,", "'\\0', p + '\\0') class Solution2: def isMatch(self, s: str, p: str) ->", "return s[0] == '\\0' if s[0] == '\\0': return p[1] == '*' and", "match the entire string \"aa\". Example 2: Input: s = \"aa\", p =", "1] == s[i - 1]) and dp[i - 1][j - 1] else: dp[i][j]", "support for '.' and '*' where: '.' Matches any single character. '*' Matches", "+ '\\0') class Solution2: def isMatch(self, s: str, p: str) -> bool: m,", "+ 1) for _ in range(m + 1)] dp[0][0] = True for j", "\"aa\". Example 3: Input: s = \"ab\", p = \".*\" Output: true Explanation:", "more (*) of any character (.)\". Example 4: Input: s = \"aab\", p", "be repeated 0 times, a can be repeated 1 time. Therefore, it matches", "a pattern (p), implement regular expression matching with support for '.' and '*'", "dp[i - 1][j - 1] else: dp[i][j] = dp[i][j - 2] or ((p[j", "true Explanation: '*' means zero or more of the preceding element, 'a'. Therefore,", "m > 0: dp[1][1] = p[0] == '.' or p[0] == s[0] for", "Therefore, by repeating 'a' once, it becomes \"aa\". Example 3: Input: s =", "'.' or p[0] == s[0] for i in range(1, m + 1): for", "'*'. It is guaranteed for each appearance of the character '*', there will", "It is guaranteed for each appearance of the character '*', there will be", "(n + 1) for _ in range(m + 1)] dp[0][0] = True for", "_ in range(m + 1)] dp[0][0] = True for j in range(2, n", "only lowercase English letters. p contains only lowercase English letters, '.', and '*'.", "-> bool: m, n = len(s), len(p) if n == 0: return m", "1] != '*': dp[i][j] = (p[j - 1] == '.' or p[j -", "'\\0': return s[0] == '\\0' if s[0] == '\\0': return p[1] == '*'", "p[j - 1] != '*': dp[i][j] = (p[j - 1] == '.' or", "s: str, p: str) -> bool: return self.__match(s + '\\0', p + '\\0')", "time. Therefore, it matches \"aab\". Example 5: Input: s = \"mississippi\", p =", "'.' Matches any single character. '*' Matches zero or more of the preceding", "def isMatch(self, s: str, p: str) -> bool: return self.__match(s + '\\0', p", "else: dp[i][j] = dp[i][j - 2] or ((p[j - 2] == '.' or", "\"ab\", p = \".*\" Output: true Explanation: \".*\" means \"zero or more (*)", "or p[j - 1] == s[i - 1]) and dp[i - 1][j -", "== '\\0': return s[0] == '\\0' if s[0] == '\\0': return p[1] ==", "Constraints: 0 <= s.length <= 20 0 <= p.length <= 30 s contains", "= \"c*a*b\" Output: true Explanation: c can be repeated 0 times, a can", "s: str, p: str) -> bool: if p[0] == '\\0': return s[0] ==", "'a'. Therefore, by repeating 'a' once, it becomes \"aa\". Example 3: Input: s", "guaranteed for each appearance of the character '*', there will be a previous", "<= s.length <= 20 0 <= p.length <= 30 s contains only lowercase", "character to match. \"\"\" class Solution1: def __match(self, s: str, p: str) ->", "character. '*' Matches zero or more of the preceding element. The matching should", "<= p.length <= 30 s contains only lowercase English letters. p contains only", "English letters. p contains only lowercase English letters, '.', and '*'. It is", "range(2, n + 1, 2): if p[j - 1] == '*': dp[0][j] =", "expression matching with support for '.' and '*' where: '.' Matches any single", "class Solution2: def isMatch(self, s: str, p: str) -> bool: m, n =", "str, p: str) -> bool: if p[0] == '\\0': return s[0] == '\\0'", "break if m > 0: dp[1][1] = p[0] == '.' or p[0] ==", "the entire string \"aa\". Example 2: Input: s = \"aa\", p = \"a*\"", "each appearance of the character '*', there will be a previous valid character", "s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] ==", "or more of the preceding element. The matching should cover the entire input", "2] == '.' or p[j - 2] == s[i - 1]) and dp[i", "character (.)\". Example 4: Input: s = \"aab\", p = \"c*a*b\" Output: true", "English letters, '.', and '*'. It is guaranteed for each appearance of the", "false Constraints: 0 <= s.length <= 20 0 <= p.length <= 30 s", "'.' or p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s,", "if p[1] == '*': if p[0] == '.' or p[0] == s[0]: return", "== 0 dp = [[False] * (n + 1) for _ in range(m", "p = \"mis*is*p*.\" Output: false Constraints: 0 <= s.length <= 20 0 <=", "\"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0 <= s.length <= 20 0", "== '.' or p[j - 2] == s[i - 1]) and dp[i -", "\"aab\", p = \"c*a*b\" Output: true Explanation: c can be repeated 0 times,", "0: dp[1][1] = p[0] == '.' or p[0] == s[0] for i in", "p: str) -> bool: return self.__match(s + '\\0', p + '\\0') class Solution2:", "s.length <= 20 0 <= p.length <= 30 s contains only lowercase English", "Input: s = \"aa\", p = \"a\" Output: false Explanation: \"a\" does not", "and '*' where: '.' Matches any single character. '*' Matches zero or more", "bool: if p[0] == '\\0': return s[0] == '\\0' if s[0] == '\\0':", "p[j - 1] == '*': dp[0][j] = True else: break if m >", "1): for j in range(2, n + 1): if p[j - 1] !=", "1] else: dp[i][j] = dp[i][j - 2] or ((p[j - 2] == '.'", "s[0] for i in range(1, m + 1): for j in range(2, n", "if p[j - 1] != '*': dp[i][j] = (p[j - 1] == '.'", "= \".*\" Output: true Explanation: \".*\" means \"zero or more (*) of any", "\"\"\" class Solution1: def __match(self, s: str, p: str) -> bool: if p[0]", "p) return self.__match(s, p[2:]) return (p[0] == '.' or p[0] == s[0]) and", "if p[j - 1] == '*': dp[0][j] = True else: break if m", "= \"a*\" Output: true Explanation: '*' means zero or more of the preceding", "any character (.)\". Example 4: Input: s = \"aab\", p = \"c*a*b\" Output:", "s = \"aa\", p = \"a\" Output: false Explanation: \"a\" does not match", "a can be repeated 1 time. Therefore, it matches \"aab\". Example 5: Input:", "and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str) -> bool: return self.__match(s", "the character '*', there will be a previous valid character to match. \"\"\"", "Therefore, it matches \"aab\". Example 5: Input: s = \"mississippi\", p = \"mis*is*p*.\"", "'a' once, it becomes \"aa\". Example 3: Input: s = \"ab\", p =", "Solution1: def __match(self, s: str, p: str) -> bool: if p[0] == '\\0':", "== '*': dp[0][j] = True else: break if m > 0: dp[1][1] =", "Output: false Constraints: 0 <= s.length <= 20 0 <= p.length <= 30", "Example 2: Input: s = \"aa\", p = \"a*\" Output: true Explanation: '*'", "element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\". Example 3: Input:", "== '.' or p[j - 1] == s[i - 1]) and dp[i -", "partial). Example 1: Input: s = \"aa\", p = \"a\" Output: false Explanation:", "Given an input string (s) and a pattern (p), implement regular expression matching", "of any character (.)\". Example 4: Input: s = \"aab\", p = \"c*a*b\"", "entire string \"aa\". Example 2: Input: s = \"aa\", p = \"a*\" Output:", "30 s contains only lowercase English letters. p contains only lowercase English letters,", "p[1] == '*' and self.isMatch(s, p[2:]) if p[1] == '*': if p[0] ==", "p[2:]) return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:]) def", "def isMatch(self, s: str, p: str) -> bool: m, n = len(s), len(p)", "!= '*': dp[i][j] = (p[j - 1] == '.' or p[j - 1]", "a previous valid character to match. \"\"\" class Solution1: def __match(self, s: str,", "times, a can be repeated 1 time. Therefore, it matches \"aab\". Example 5:", "Explanation: \".*\" means \"zero or more (*) of any character (.)\". Example 4:", "with support for '.' and '*' where: '.' Matches any single character. '*'", "the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\". Example", "p = \".*\" Output: true Explanation: \".*\" means \"zero or more (*) of", "== s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str) -> bool:", "= True else: break if m > 0: dp[1][1] = p[0] == '.'", "class Solution1: def __match(self, s: str, p: str) -> bool: if p[0] ==", "range(m + 1)] dp[0][0] = True for j in range(2, n + 1,", "s = \"aab\", p = \"c*a*b\" Output: true Explanation: c can be repeated", "previous valid character to match. \"\"\" class Solution1: def __match(self, s: str, p:", "if p[0] == '\\0': return s[0] == '\\0' if s[0] == '\\0': return", "an input string (s) and a pattern (p), implement regular expression matching with", "of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\".", "p + '\\0') class Solution2: def isMatch(self, s: str, p: str) -> bool:", "== '.' or p[0] == s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return", "any single character. '*' Matches zero or more of the preceding element. The", "or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it", "be repeated 1 time. Therefore, it matches \"aab\". Example 5: Input: s =", "can be repeated 1 time. Therefore, it matches \"aab\". Example 5: Input: s", "and '*'. It is guaranteed for each appearance of the character '*', there", "== s[0]: return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0]", "== 0: return m == 0 dp = [[False] * (n + 1)", "valid character to match. \"\"\" class Solution1: def __match(self, s: str, p: str)", "j in range(2, n + 1, 2): if p[j - 1] == '*':", "only lowercase English letters, '.', and '*'. It is guaranteed for each appearance", "== s[i - 1]) and dp[i - 1][j - 1] else: dp[i][j] =", "the preceding element. The matching should cover the entire input string (not partial).", "Explanation: c can be repeated 0 times, a can be repeated 1 time.", "in range(1, m + 1): for j in range(2, n + 1): if", "Output: true Explanation: \".*\" means \"zero or more (*) of any character (.)\".", "matching with support for '.' and '*' where: '.' Matches any single character.", "matches \"aab\". Example 5: Input: s = \"mississippi\", p = \"mis*is*p*.\" Output: false", "p.length <= 30 s contains only lowercase English letters. p contains only lowercase", "p = \"a\" Output: false Explanation: \"a\" does not match the entire string", "((p[j - 2] == '.' or p[j - 2] == s[i - 1])", "if m > 0: dp[1][1] = p[0] == '.' or p[0] == s[0]", "'\\0' if s[0] == '\\0': return p[1] == '*' and self.isMatch(s, p[2:]) if", "return self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] == '.'", "- 2] == '.' or p[j - 2] == s[i - 1]) and", "- 1] == '.' or p[j - 1] == s[i - 1]) and", "means \"zero or more (*) of any character (.)\". Example 4: Input: s", "Input: s = \"aab\", p = \"c*a*b\" Output: true Explanation: c can be", "2: Input: s = \"aa\", p = \"a*\" Output: true Explanation: '*' means", "s = \"aa\", p = \"a*\" Output: true Explanation: '*' means zero or", "appearance of the character '*', there will be a previous valid character to", "- 1] == s[i - 1]) and dp[i - 1][j - 1] else:", "s[0] == '\\0' if s[0] == '\\0': return p[1] == '*' and self.isMatch(s,", "== '\\0': return p[1] == '*' and self.isMatch(s, p[2:]) if p[1] == '*':", "1) for _ in range(m + 1)] dp[0][0] = True for j in", "matching should cover the entire input string (not partial). Example 1: Input: s", "1]) and dp[i - 1][j - 1] else: dp[i][j] = dp[i][j - 2]", "true Explanation: \".*\" means \"zero or more (*) of any character (.)\". Example", "\".*\" means \"zero or more (*) of any character (.)\". Example 4: Input:", "self.isMatch(s, p[2:]) if p[1] == '*': if p[0] == '.' or p[0] ==", "or p[j - 2] == s[i - 1]) and dp[i - 1][j]) return", "== s[0] for i in range(1, m + 1): for j in range(2,", "1][j - 1] else: dp[i][j] = dp[i][j - 2] or ((p[j - 2]", "+ 1)] dp[0][0] = True for j in range(2, n + 1, 2):", "+ 1): if p[j - 1] != '*': dp[i][j] = (p[j - 1]", "or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] == '.' or p[0] ==", "Output: true Explanation: '*' means zero or more of the preceding element, 'a'.", "= True for j in range(2, n + 1, 2): if p[j -", "'.', and '*'. It is guaranteed for each appearance of the character '*',", "it becomes \"aa\". Example 3: Input: s = \"ab\", p = \".*\" Output:", "return self.__match(s, p[2:]) return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:],", "range(1, m + 1): for j in range(2, n + 1): if p[j", "p[0] == s[0]) and self.__match(s[1:], p[1:]) def isMatch(self, s: str, p: str) ->", "Input: s = \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0 <= s.length", "1): if p[j - 1] != '*': dp[i][j] = (p[j - 1] ==", "by repeating 'a' once, it becomes \"aa\". Example 3: Input: s = \"ab\",", "- 1][j - 1] else: dp[i][j] = dp[i][j - 2] or ((p[j -", "dp[0][j] = True else: break if m > 0: dp[1][1] = p[0] ==", "Matches any single character. '*' Matches zero or more of the preceding element.", "n + 1): if p[j - 1] != '*': dp[i][j] = (p[j -", "'*': if p[0] == '.' or p[0] == s[0]: return self.__match(s, p[2:]) or", "isMatch(self, s: str, p: str) -> bool: return self.__match(s + '\\0', p +", "-> bool: return self.__match(s + '\\0', p + '\\0') class Solution2: def isMatch(self,", "letters. p contains only lowercase English letters, '.', and '*'. It is guaranteed", "'.' and '*' where: '.' Matches any single character. '*' Matches zero or", "4: Input: s = \"aab\", p = \"c*a*b\" Output: true Explanation: c can", "- 2] or ((p[j - 2] == '.' or p[j - 2] ==", "m + 1): for j in range(2, n + 1): if p[j -", "for j in range(2, n + 1): if p[j - 1] != '*':", "- 1] else: dp[i][j] = dp[i][j - 2] or ((p[j - 2] ==", "\"aab\". Example 5: Input: s = \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints:", "'.' or p[j - 1] == s[i - 1]) and dp[i - 1][j", "\"a*\" Output: true Explanation: '*' means zero or more of the preceding element,", "self.__match(s, p[2:]) or self.__match(s[1:], p) return self.__match(s, p[2:]) return (p[0] == '.' or", "s = \"mississippi\", p = \"mis*is*p*.\" Output: false Constraints: 0 <= s.length <=", "m == 0 dp = [[False] * (n + 1) for _ in", "Output: true Explanation: c can be repeated 0 times, a can be repeated", "len(p) if n == 0: return m == 0 dp = [[False] *", "0 <= s.length <= 20 0 <= p.length <= 30 s contains only", "return p[1] == '*' and self.isMatch(s, p[2:]) if p[1] == '*': if p[0]", "m, n = len(s), len(p) if n == 0: return m == 0", "more of the preceding element. The matching should cover the entire input string", "\"aa\", p = \"a*\" Output: true Explanation: '*' means zero or more of", "p[0] == s[0] for i in range(1, m + 1): for j in", "n == 0: return m == 0 dp = [[False] * (n +", "1: Input: s = \"aa\", p = \"a\" Output: false Explanation: \"a\" does" ]
[ "rc # 392secs with loc,434 secs with the regression. R time, 10secs for", "else 1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk", "!= 1: seq = numtonuc[copy&mask] + seq copy >>= 2 if copy ==", "not find the append-left on the input sequence\") return 0 return rev def", "for i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i])", "int(seqbin) while copy != 1: rev <<= 2 rev |= complement[copy&mask] copy >>=", "in range(0,len(seq)): if i in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]]", "rev <<= 2 rev |= complement[copy&mask] copy >>= 2 if copy == 0:", "insert_pos(seqint,base,pos): # pos is position from the right return ((seqint << 2) &", "secs with the regression. R time, 10secs for allocation, 3.97mins for linreg #", "the first or the last number representation def seqpos(kmer,last): return 1 << (1", "def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return", "2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2)", "Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our", "10secs for allocation, 3.97mins for linreg # with 'at', only 23secs! -- 254secs", "(4**gapmer)-1: # gap calculation here cur = cpy & mask right = cur", "loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins for linreg", "cur = cpy & mask right = cur & ((4**rightseparator)-1) left = (cur", "the gapmodel, our model become gapsize + kmer gapmer = kmer+gapsize # separator,", "function already counts without its reverse complement, # i.e. oligfreq + reverse merge", "# with the gapmodel, our model become gapsize + kmer gapmer = kmer+gapsize", "(range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i <=", "original R code # Input: panda list and kmer length # Output: oligonucleotide", "'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): #", "reverse complement, # i.e. oligfreq + reverse merge in the original R code", "needs append 1 to the left binrep = 1 gaps = range(gappos,gappos+gapsize) for", "its reverse complement, # i.e. oligfreq + reverse merge in the original R", "to the left binrep = 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)):", "this project always needs append 1 to the left binrep = 1 gaps", "| (nucleotides[base] << pos*2) # this function already counts without its reverse complement,", "our model become gapsize + kmer gapmer = kmer+gapsize # separator, since this", "seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if", "nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for", "2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint", "project always needs append 1 to the left binrep = 1 gaps =", "# 392secs with loc,434 secs with the regression. R time, 10secs for allocation,", "oligfreq + reverse merge in the original R code # Input: panda list", "window_size + 1): yield fseq[i:i+window_size] # return the first or the last number", "binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1", "seqint seq = \"\" mask = 3 copy = int(seqint) # prevent changing", "removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize + kmer", "are already reverse-deleted # all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3}", "input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths,", "leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k in nonrev_list}", "!= 1: rev <<= 2 rev |= complement[copy&mask] copy >>= 2 if copy", "{0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) - window_size + 1): yield", "kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k in", "regression. R time, 10secs for allocation, 3.97mins for linreg # with 'at', only", "complement[copy&mask] copy >>= 2 if copy == 0: print(\"Could not find the append-left", "return rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T':", "0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always", "the append-left on the input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): #", "return the first or the last number representation def seqpos(kmer,last): return 1 <<", "| (nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint & 2**pos-1) &", "'A'} return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position", "'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is", "1 mask = 3 copy = int(seqbin) while copy != 1: rev <<=", "((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2))", "* len(seqtbl) for k in nonrev_list} # use dictionary first to avoid slow", "<= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not int: return", "3 copy = int(seqint) # prevent changing the original value while(copy) != 1:", "the original R code # Input: panda list and kmer length # Output:", "revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not int: return seqint", "nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not int: return seqint seq", "= range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps: continue binrep <<=", "is binary, the number is counted from the right rightseparator = kmer-gappos leftseparator", "base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the right return", "R code # Input: panda list and kmer length # Output: oligonucleotide count", "* kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i", "1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps: continue", "Input: panda list and kmer length # Output: oligonucleotide count with reverse removed", "<< (1 + 2 * kmer) if last else 1 << 2 *", "> (4**gapmer)-1: # gap calculation here cur = cpy & mask right =", "pos is position from the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) |", "left binrep = 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i", "in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin):", "counted from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k:", "seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always needs append 1 to", "from panda data frame for i in range(0,len(seqtbl)): #22s for 3000 mask =", "1 << (1 + 2 * kmer) if last else 1 << 2", "seq copy >>= 2 if copy == 0: print(\"Could not find the append-left", "append 1 to the left binrep = 1 gaps = range(gappos,gappos+gapsize) for i", "without its reverse complement, # i.e. oligfreq + reverse merge in the original", "def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)):", "append-left on the input sequence\") return 0 return rev def revcompstr(seq): rev =", "\"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the", "gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i)", "append 1 rc = revcomp(r) if r > rc: r = rc #", "complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) - window_size +", "gapmodel, our model become gapsize + kmer gapmer = kmer+gapsize # separator, since", "- window_size + 1): yield fseq[i:i+window_size] # return the first or the last", "= int(seqbin) while copy != 1: rev <<= 2 rev |= complement[copy&mask] copy", "the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) |", "complement, # i.e. oligfreq + reverse merge in the original R code #", "from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0]", "rev = 1 mask = 3 copy = int(seqbin) while copy != 1:", "return 1 << (1 + 2 * kmer) if last else 1 <<", "def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize + kmer gapmer", "if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not", "# all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'}", "model become gapsize + kmer gapmer = kmer+gapsize # separator, since this is", "revcomp(r) if r > rc: r = rc # 392secs with loc,434 secs", "# separator, since this is binary, the number is counted from the right", "in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint)", "print(\"Could not find the append-left on the input sequence\") return 0 return seq", "range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return the first or the", "return seqint seq = \"\" mask = 3 copy = int(seqint) # prevent", "int: return seqint seq = \"\" mask = 3 copy = int(seqint) #", "seqlengths, this project always needs append 1 to the left binrep = 1", "<<= 2 rev |= complement[copy&mask] copy >>= 2 if copy == 0: print(\"Could", "* kmer) if last else 1 << 2 * kmer; def seq_permutation(seqlen): return", "= {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) - window_size + 1):", "calculation here cur = cpy & mask right = cur & ((4**rightseparator)-1) left", "itoseq(seqint): if type(seqint) is not int: return seqint seq = \"\" mask =", "left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left | right r", "reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the right return ((seqint <<", "gap calculation here cur = cpy & mask right = cur & ((4**rightseparator)-1)", "sequence\") return 0 return rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G',", "= kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k", "binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size):", "for i in range(0,len(seq)): if i in gaps: continue binrep <<= 2 binrep", "fseq[i:i+window_size] # return the first or the last number representation def seqpos(kmer,last): return", "become gapsize + kmer gapmer = kmer+gapsize # separator, since this is binary,", "r > rc: r = rc # 392secs with loc,434 secs with the", "rc = revcomp(r) if r > rc: r = rc # 392secs with", "~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this function already counts without", "~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2)", "2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask =", "as np # all permutations are already reverse-deleted # all sequences are represented", "for k in nonrev_list} # use dictionary first to avoid slow indexing from", "& 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint &", "range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is", "for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint):", "1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk =", "gapsize + kmer gapmer = kmer+gapsize # separator, since this is binary, the", "return 0 return rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G':", "already reverse-deleted # all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc", "sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement =", "indexing from panda data frame for i in range(0,len(seqtbl)): #22s for 3000 mask", "right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base]", "<< 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] <<", "olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary", "rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for", "((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint", "(1 + 2 * kmer) if last else 1 << 2 * kmer;", "# gap calculation here cur = cpy & mask right = cur &", "range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy >", "(seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base]", "pd import numpy as np # all permutations are already reverse-deleted # all", "-- 254secs total for 6mer olig_df[r][i] += 1 cpy >>= 2 return pd.DataFrame(olig_df)", "rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}", "since this is binary, the number is counted from the right rightseparator =", "here cur = cpy & mask right = cur & ((4**rightseparator)-1) left =", "rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} # use", "from the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1)", "R time, 10secs for allocation, 3.97mins for linreg # with 'at', only 23secs!", "kmer) if last else 1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True)))", "on the input sequence\") return 0 return rev def revcompstr(seq): rev = {'A':", "def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always needs append 1", "= (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left | right r =", "if type(seqint) is not int: return seqint seq = \"\" mask = 3", "copy = int(seqint) # prevent changing the original value while(copy) != 1: seq", "(nucleotides[base] << pos*2) # this function already counts without its reverse complement, #", "rev |= complement[copy&mask] copy >>= 2 if copy == 0: print(\"Could not find", "'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)]) def", "is position from the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint", "cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left", "avoid slow indexing from panda data frame for i in range(0,len(seqtbl)): #22s for", "& mask right = cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) <<", "# with 'at', only 23secs! -- 254secs total for 6mer olig_df[r][i] += 1", "(cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint", "1: seq = numtonuc[copy&mask] + seq copy >>= 2 if copy == 0:", "{'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in", "reverse merge in the original R code # Input: panda list and kmer", "for linreg # with 'at', only 23secs! -- 254secs total for 6mer olig_df[r][i]", "last number representation def seqpos(kmer,last): return 1 << (1 + 2 * kmer)", "append-left on the input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due", "(1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r > rc: r =", "# append 1 rc = revcomp(r) if r > rc: r = rc", "if r > rc: r = rc # 392secs with loc,434 secs with", ">>= 2 if copy == 0: print(\"Could not find the append-left on the", "list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def", "<< (pos*2)) | (nucleotides[base] << pos*2) # this function already counts without its", "= 1 mask = 3 copy = int(seqbin) while copy != 1: rev", "range(0,len(seq)): if i in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return", "2 rev |= complement[copy&mask] copy >>= 2 if copy == 0: print(\"Could not", "+ kmer gapmer = kmer+gapsize # separator, since this is binary, the number", "def itoseq(seqint): if type(seqint) is not int: return seqint seq = \"\" mask", "[0] * len(seqtbl) for k in nonrev_list} # use dictionary first to avoid", "392secs with loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins", "int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here cur = cpy &", "i.e. oligfreq + reverse merge in the original R code # Input: panda", "+ 1): yield fseq[i:i+window_size] # return the first or the last number representation", "(4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here cur", "0: print(\"Could not find the append-left on the input sequence\") return 0 return", "panda data frame for i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1", "is not int: return seqint seq = \"\" mask = 3 copy =", "pos*2) # this function already counts without its reverse complement, # i.e. oligfreq", "& 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this function", "gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps: continue binrep", "= {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) -", "gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev", "represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def", "3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap", "are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1}", "oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model", "& ~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this function already counts", "3.97mins for linreg # with 'at', only 23secs! -- 254secs total for 6mer", "in the original R code # Input: panda list and kmer length #", "for allocation, 3.97mins for linreg # with 'at', only 23secs! -- 254secs total", "all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement", "r = rc # 392secs with loc,434 secs with the regression. R time,", "with loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins for", "def seqpos(kmer,last): return 1 << (1 + 2 * kmer) if last else", "1: rev <<= 2 rev |= complement[copy&mask] copy >>= 2 if copy ==", "return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project", "all permutations are already reverse-deleted # all sequences are represented in binary nucleotides", "in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq,", "to various seqlengths, this project always needs append 1 to the left binrep", "this is binary, the number is counted from the right rightseparator = kmer-gappos", "return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i", "2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint & 2**pos-1)", "((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left | right", "<< 2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint # append 1", "<< 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list()", "reverse-deleted # all sequences are represented in binary nucleotides = {'A':0,'C':1,'G':2,'T':3} numtonuc =", "last else 1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k):", "value while(copy) != 1: seq = numtonuc[copy&mask] + seq copy >>= 2 if", "1 to the left binrep = 1 gaps = range(gappos,gappos+gapsize) for i in", "range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps: continue binrep <<= 2", "import numpy as np # all permutations are already reverse-deleted # all sequences", "position from the right return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint &", "as pd import numpy as np # all permutations are already reverse-deleted #", "import pandas as pd import numpy as np # all permutations are already", "#return (seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) |", "0 return rev def revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C',", "permutations are already reverse-deleted # all sequences are represented in binary nucleotides =", "type(seqint) is not int: return seqint seq = \"\" mask = 3 copy", "if last else 1 << 2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def", "list and kmer length # Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0):", "> rc: r = rc # 392secs with loc,434 secs with the regression.", "# all permutations are already reverse-deleted # all sequences are represented in binary", "= {k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary first", "= list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk", "{0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq) - window_size", "mask = 3 copy = int(seqbin) while copy != 1: rev <<= 2", "nonrevk def itoseq(seqint): if type(seqint) is not int: return seqint seq = \"\"", "find the append-left on the input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0):", "(pos*2)) | (nucleotides[base] << pos*2) # this function already counts without its reverse", "reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize +", "panda list and kmer length # Output: oligonucleotide count with reverse removed def", "& ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint = left |", "|= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask = 3 copy", "= rightseparator+gapsize olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} #", "+ seq copy >>= 2 if copy == 0: print(\"Could not find the", "pos*2)) #return (seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2))", "= int(seqint) # prevent changing the original value while(copy) != 1: seq =", "numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i in range(len(fseq)", "with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize", "window_size): for i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return", "= revcomp(r) if r > rc: r = rc # 392secs with loc,434", "|= complement[copy&mask] copy >>= 2 if copy == 0: print(\"Could not find the", "number representation def seqpos(kmer,last): return 1 << (1 + 2 * kmer) if", "representation def seqpos(kmer,last): return 1 << (1 + 2 * kmer) if last", "def gen_nonreversed_kmer(k): nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i):", "original value while(copy) != 1: seq = numtonuc[copy&mask] + seq copy >>= 2", "i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if", "1): yield fseq[i:i+window_size] # return the first or the last number representation def", "+ reverse merge in the original R code # Input: panda list and", "data frame for i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy", "'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos):", "<< pos*2)) #return (seqint << 2) | (seqint & 2**pos-1) & ~(3 <<", "cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here cur =", "right r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r >", "the append-left on the input sequence\") return 0 return rev def revcompstr(seq): rev", "left | right r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if", "cpy > (4**gapmer)-1: # gap calculation here cur = cpy & mask right", "== 0: print(\"Could not find the append-left on the input sequence\") return 0", "due to various seqlengths, this project always needs append 1 to the left", "<< pos*2) # this function already counts without its reverse complement, # i.e.", "only 23secs! -- 254secs total for 6mer olig_df[r][i] += 1 cpy >>= 2", "def insert_pos(seqint,base,pos): # pos is position from the right return ((seqint << 2)", "kmer length # Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with", "length # Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the", "mask = 3 copy = int(seqint) # prevent changing the original value while(copy)", "return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from", "= {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base", "number is counted from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df", "binrep def revcomp(seqbin): rev = 1 mask = 3 copy = int(seqbin) while", "use dictionary first to avoid slow indexing from panda data frame for i", "# due to various seqlengths, this project always needs append 1 to the", "this function already counts without its reverse complement, # i.e. oligfreq + reverse", "k in nonrev_list} # use dictionary first to avoid slow indexing from panda", "<<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask", "return nonrevk def itoseq(seqint): if type(seqint) is not int: return seqint seq =", "= cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint =", "while(copy) != 1: seq = numtonuc[copy&mask] + seq copy >>= 2 if copy", "while cpy > (4**gapmer)-1: # gap calculation here cur = cpy & mask", "def revcomp(seqbin): rev = 1 mask = 3 copy = int(seqbin) while copy", "the input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various", "revcompstr(seq): rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base]", "copy == 0: print(\"Could not find the append-left on the input sequence\") return", "2 if copy == 0: print(\"Could not find the append-left on the input", "# this function already counts without its reverse complement, # i.e. oligfreq +", "'T', 'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)])", "= left | right r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r)", "| ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint << 2) |", "# Input: panda list and kmer length # Output: oligonucleotide count with reverse", "#22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1:", "if i in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep", "<< 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return", "the regression. R time, 10secs for allocation, 3.97mins for linreg # with 'at',", "r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r > rc:", "first or the last number representation def seqpos(kmer,last): return 1 << (1 +", "# use dictionary first to avoid slow indexing from panda data frame for", "= 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in gaps:", "or the last number representation def seqpos(kmer,last): return 1 << (1 + 2", "separator, since this is binary, the number is counted from the right rightseparator", "window(fseq, window_size): for i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] #", "= kmer+gapsize # separator, since this is binary, the number is counted from", "with the gapmodel, our model become gapsize + kmer gapmer = kmer+gapsize #", "(seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this", "first to avoid slow indexing from panda data frame for i in range(0,len(seqtbl)):", "return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] <<", "1 rc = revcomp(r) if r > rc: r = rc # 392secs", "= \"\" mask = 3 copy = int(seqint) # prevent changing the original", "i in range(0,len(seq)): if i in gaps: continue binrep <<= 2 binrep |=", "right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] * len(seqtbl)", "count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become", "= numtonuc[copy&mask] + seq copy >>= 2 if copy == 0: print(\"Could not", "# Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel,", "(nucleotides[base] << pos*2)) #return (seqint << 2) | (seqint & 2**pos-1) & ~(3", "the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df = {k: [0] *", "changing the original value while(copy) != 1: seq = numtonuc[copy&mask] + seq copy", "for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: #", "and kmer length # Output: oligonucleotide count with reverse removed def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): #", "pandas as pd import numpy as np # all permutations are already reverse-deleted", "np # all permutations are already reverse-deleted # all sequences are represented in", "in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return the first or", "counts without its reverse complement, # i.e. oligfreq + reverse merge in the", "nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0): # with the gapmodel, our model become gapsize + kmer gapmer =", "already counts without its reverse complement, # i.e. oligfreq + reverse merge in", "find the append-left on the input sequence\") return 0 return rev def revcompstr(seq):", "2 * kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for", "is counted from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize olig_df =", "numpy as np # all permutations are already reverse-deleted # all sequences are", "for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the right", "= {'A':0,'C':1,'G':2,'T':3} numtonuc = {0:'A',1:'C',2:'G',3:'T'} complement = {0:3,3:0,1:2,2:1} def window(fseq, window_size): for i", "sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this", "allocation, 3.97mins for linreg # with 'at', only 23secs! -- 254secs total for", "seq = numtonuc[copy&mask] + seq copy >>= 2 if copy == 0: print(\"Could", "revcomp(seqbin): rev = 1 mask = 3 copy = int(seqbin) while copy !=", "copy = int(seqbin) while copy != 1: rev <<= 2 rev |= complement[copy&mask]", "mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation", "3 copy = int(seqbin) while copy != 1: rev <<= 2 rev |=", "slow indexing from panda data frame for i in range(0,len(seqtbl)): #22s for 3000", "gapmer = kmer+gapsize # separator, since this is binary, the number is counted", "return binrep def revcomp(seqbin): rev = 1 mask = 3 copy = int(seqbin)", "kmer; def seq_permutation(seqlen): return (range(seqpos(seqlen,False),seqpos(seqlen,True))) def gen_nonreversed_kmer(k): nonrevk = list() for i in", "= (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r > rc: r", "right = cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator gappedseqint", "for i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return the", "not int: return seqint seq = \"\" mask = 3 copy = int(seqint)", "| (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2) #", "i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size] # return the first", "return seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always needs", "# pos is position from the right return ((seqint << 2) & ~(2**(2*pos+2)-1))", "i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while", "= rc # 392secs with loc,434 secs with the regression. R time, 10secs", "not find the append-left on the input sequence\") return 0 return seq def", "\"\" mask = 3 copy = int(seqint) # prevent changing the original value", "print(\"Could not find the append-left on the input sequence\") return 0 return rev", "gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint # append 1 rc =", "= int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here cur = cpy", "& ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2)) #return (seqint <<", "i in gaps: continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def", "= cpy & mask right = cur & ((4**rightseparator)-1) left = (cur >>", "various seqlengths, this project always needs append 1 to the left binrep =", "prevent changing the original value while(copy) != 1: seq = numtonuc[copy&mask] + seq", "{'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for base in", "= 3 copy = int(seqbin) while copy != 1: rev <<= 2 rev", "mask right = cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator) << 2*rightseparator", "binrep = 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if i in", "nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask = 3 copy =", "rc: r = rc # 392secs with loc,434 secs with the regression. R", "nonrevk = list() for i in range(seqpos(k,False),seqpos(k,True)): if i <= revcomp(i): nonrevk.append(i) return", "'C', 'T': 'A'} return \"\".join([rev[base] for base in reversed(seq)]) def insert_pos(seqint,base,pos): # pos", "= (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy > (4**gapmer)-1: # gap calculation here", "with the regression. R time, 10secs for allocation, 3.97mins for linreg # with", "kmer+gapsize # separator, since this is binary, the number is counted from the", "the number is counted from the right rightseparator = kmer-gappos leftseparator = rightseparator+gapsize", "kmer gapmer = kmer+gapsize # separator, since this is binary, the number is", "the input sequence\") return 0 return rev def revcompstr(seq): rev = {'A': 'T',", "seqpos(kmer,last): return 1 << (1 + 2 * kmer) if last else 1", "in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy = int(seqtbl[i]) while cpy", "code # Input: panda list and kmer length # Output: oligonucleotide count with", "{k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary first to", "def window(fseq, window_size): for i in range(len(fseq) - window_size + 1): yield fseq[i:i+window_size]", "yield fseq[i:i+window_size] # return the first or the last number representation def seqpos(kmer,last):", "frame for i in range(0,len(seqtbl)): #22s for 3000 mask = (4**gapmer)-1 cpy =", "nonrev_list} # use dictionary first to avoid slow indexing from panda data frame", "seq = \"\" mask = 3 copy = int(seqint) # prevent changing the", "the left binrep = 1 gaps = range(gappos,gappos+gapsize) for i in range(0,len(seq)): if", "# i.e. oligfreq + reverse merge in the original R code # Input:", "cpy & mask right = cur & ((4**rightseparator)-1) left = (cur >> 2*leftseparator)", "rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return \"\".join([rev[base] for", "# prevent changing the original value while(copy) != 1: seq = numtonuc[copy&mask] +", "linreg # with 'at', only 23secs! -- 254secs total for 6mer olig_df[r][i] +=", "numtonuc[copy&mask] + seq copy >>= 2 if copy == 0: print(\"Could not find", "2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2) # this function already", "dictionary first to avoid slow indexing from panda data frame for i in", "the original value while(copy) != 1: seq = numtonuc[copy&mask] + seq copy >>=", "import itertools import pandas as pd import numpy as np # all permutations", "continue binrep <<= 2 binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev =", "| right r = (1<<(2*kmer))|gappedseqint # append 1 rc = revcomp(r) if r", "int(seqint) # prevent changing the original value while(copy) != 1: seq = numtonuc[copy&mask]", "+ 2 * kmer) if last else 1 << 2 * kmer; def", "2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint # append 1 rc", "'at', only 23secs! -- 254secs total for 6mer olig_df[r][i] += 1 cpy >>=", "= 3 copy = int(seqint) # prevent changing the original value while(copy) !=", "if copy == 0: print(\"Could not find the append-left on the input sequence\")", "2*leftseparator) << 2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint # append", "to avoid slow indexing from panda data frame for i in range(0,len(seqtbl)): #22s", "on the input sequence\") return 0 return seq def seqtoi(seq,gappos=0,gapsize=0): # due to", "seq def seqtoi(seq,gappos=0,gapsize=0): # due to various seqlengths, this project always needs append", "len(seqtbl) for k in nonrev_list} # use dictionary first to avoid slow indexing", "merge in the original R code # Input: panda list and kmer length", "copy != 1: rev <<= 2 rev |= complement[copy&mask] copy >>= 2 if", "time, 10secs for allocation, 3.97mins for linreg # with 'at', only 23secs! --", "i <= revcomp(i): nonrevk.append(i) return nonrevk def itoseq(seqint): if type(seqint) is not int:", "# return the first or the last number representation def seqpos(kmer,last): return 1", "in nonrev_list} # use dictionary first to avoid slow indexing from panda data", "the last number representation def seqpos(kmer,last): return 1 << (1 + 2 *", "binary, the number is counted from the right rightseparator = kmer-gappos leftseparator =", "2 * kmer) if last else 1 << 2 * kmer; def seq_permutation(seqlen):", "23secs! -- 254secs total for 6mer olig_df[r][i] += 1 cpy >>= 2 return", ">> 2*leftseparator) << 2*rightseparator gappedseqint = left | right r = (1<<(2*kmer))|gappedseqint #", "with 'at', only 23secs! -- 254secs total for 6mer olig_df[r][i] += 1 cpy", "itertools import pandas as pd import numpy as np # all permutations are", "always needs append 1 to the left binrep = 1 gaps = range(gappos,gappos+gapsize)", "while copy != 1: rev <<= 2 rev |= complement[copy&mask] copy >>= 2", "in reversed(seq)]) def insert_pos(seqint,base,pos): # pos is position from the right return ((seqint", "input sequence\") return 0 return rev def revcompstr(seq): rev = {'A': 'T', 'C':", "copy >>= 2 if copy == 0: print(\"Could not find the append-left on", "binrep |= nucleotides[seq[i]] return binrep def revcomp(seqbin): rev = 1 mask = 3" ]
[]
[ "= config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output = '' for i in", "s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2,", "VLAN number & Switch address switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg):", "+ ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for", "element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\")", "= element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC", "output.extend(data) text = \"\" for j in output: text += j + '\\n'", "= [] for varBind in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\"", "j in output: text += j + '\\n' with open('mac_mapper.txt', \"w\") as f:", "for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries", "1][0] or '?'), file=sys.stderr) break else: data = [] for varBind in varBinds:", "'\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if file != None: with open(file,", "config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output = '' for i in range(len(arr)):", "print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr)", "print(data) output.extend(data) text = \"\" for j in output: text += j +", "for varBind in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \",", "#.env file need to have VLAN number & Switch address switches = config['switches']", "i in range(len(arr)): if i == len(arr) - 1: output = output +", "output + hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\"", "if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex", "[] mac_addresses = [] for s in switches: host = s['switch_address'] for (errorIndication,", "import * from parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file", "element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host", "element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text", "need to have VLAN number & Switch address switches = config['switches'] vlan =", "switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host,", "with open(file, \"w\") as f: for address in mac_addresses: f.write(address+\"\\n\") if __name__ ==", "f: f.write(text) if file != None: with open(file, \"w\") as f: for address", "\"w\") as f: for address in mac_addresses: f.write(address+\"\\n\") if __name__ == \"__main__\": mac_mapper()", "SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number & Switch address switches", "j + '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if file != None:", "CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication:", "file=sys.stderr) break else: data = [] for varBind in varBinds: element = str(varBind)", "for i in range(len(arr)): if i == len(arr) - 1: output = output", "+ '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if file != None: with", "mac_addresses = [] for s in switches: host = s['switch_address'] for (errorIndication, errorStatus,", "\"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host +", "(errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else: data =", "text = \"\" for j in output: text += j + '\\n' with", "#Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number & Switch address", "as f: f.write(text) if file != None: with open(file, \"w\") as f: for", "mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text =", "output = [] mac_addresses = [] for s in switches: host = s['switch_address']", "= [] mac_addresses = [] for s in switches: host = s['switch_address'] for", "host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout", "output = '' for i in range(len(arr)): if i == len(arr) - 1:", "[] for varBind in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" =", "break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0]", "% (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else: data", "* from parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need", "\":\" return output def mac_mapper(file): output = [] mac_addresses = [] for s", "ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j in output: text +=", "else: output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file):", "element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\")", "Switch address switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\")", "print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex)", "switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output =", "hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file): output = [] mac_addresses =", "mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data)", "+= j + '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if file !=", "f.write(text) if file != None: with open(file, \"w\") as f: for address in", "161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr)", "errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'),", "varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')),", "open(file, \"w\") as f: for address in mac_addresses: f.write(address+\"\\n\") if __name__ == \"__main__\":", "= 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break", "+ hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return", "'?'), file=sys.stderr) break else: data = [] for varBind in varBinds: element =", "for j in output: text += j + '\\n' with open('mac_mapper.txt', \"w\") as", "= arg.split(\".\") output = '' for i in range(len(arr)): if i == len(arr)", "lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(),", "s in switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(),", "varBind in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\")", "parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have", "5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at", "arg.split(\".\") output = '' for i in range(len(arr)): if i == len(arr) -", "= '' for i in range(len(arr)): if i == len(arr) - 1: output", "and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else: data = [] for", "= [] for s in switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex,", "errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and", "varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr =", "varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else: data = [] for varBind", "arr = arg.split(\".\") output = '' for i in range(len(arr)): if i ==", "file need to have VLAN number & Switch address switches = config['switches'] vlan", "+ mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j in", "'').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def", "output def mac_mapper(file): output = [] mac_addresses = [] for s in switches:", "= element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address)", "def decToHexAddress(arg): arr = arg.split(\".\") output = '' for i in range(len(arr)): if", "- 1][0] or '?'), file=sys.stderr) break else: data = [] for varBind in", "data = [] for varBind in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\",", "if file != None: with open(file, \"w\") as f: for address in mac_addresses:", "file != None: with open(file, \"w\") as f: for address in mac_addresses: f.write(address+\"\\n\")", "= config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output = ''", "in range(len(arr)): if i == len(arr) - 1: output = output + hex(int(arr[i])).replace('0x',", "errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(),", "output = output + hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper()", "\", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' +", "splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH", "== len(arr) - 1: output = output + hex(int(arr[i])).replace('0x', '').upper() else: output =", "in switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)),", "& Switch address switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr =", "mac_mapper(file): output = [] mac_addresses = [] for s in switches: host =", "else: data = [] for varBind in varBinds: element = str(varBind) element =", "with open('mac_mapper.txt', \"w\") as f: f.write(text) if file != None: with open(file, \"w\")", "address switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output", "[] for s in switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds)", "output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file): output = []", "= element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data)", "1: output = output + hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x',", "open('mac_mapper.txt', \"w\") as f: f.write(text) if file != None: with open(file, \"w\") as", "+ \":\" return output def mac_mapper(file): output = [] mac_addresses = [] for", "<gh_stars>1-10 from pysnmp.hlapi import * from parse_config import config import sys #Reference SNMAP-WALK", "'').upper() + \":\" return output def mac_mapper(file): output = [] mac_addresses = []", "data.append(host + ',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\"", "or '?'), file=sys.stderr) break else: data = [] for varBind in varBinds: element", "',' + mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j", "= 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s", "at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break", "!= None: with open(file, \"w\") as f: for address in mac_addresses: f.write(address+\"\\n\") if", "= str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address", "range(len(arr)): if i == len(arr) - 1: output = output + hex(int(arr[i])).replace('0x', '').upper()", "%s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else:", "errorIndex and varBinds[int(errorIndex) - 1][0] or '?'), file=sys.stderr) break else: data = []", "= \", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ','", "to have VLAN number & Switch address switches = config['switches'] vlan = config['private_number']", "nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if", "i == len(arr) - 1: output = output + hex(int(arr[i])).replace('0x', '').upper() else: output", "sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number & Switch", "errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5),", "= output + hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper() +", "\";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0])) mac_addresses.append(mac_address) data.append(host + ',' + mac_address)", "str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address =", "decToHexAddress(arg): arr = arg.split(\".\") output = '' for i in range(len(arr)): if i", "output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file): output", "'' for i in range(len(arr)): if i == len(arr) - 1: output =", "+ hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file): output = [] mac_addresses", "in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False):", "hex(int(arr[i])).replace('0x', '').upper() else: output = output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output", "from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number & Switch address switches =", "have VLAN number & Switch address switches = config['switches'] vlan = config['private_number'] def", "= s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout =", "number & Switch address switches = config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr", "= output + hex(int(arr[i])).replace('0x', '').upper() + \":\" return output def mac_mapper(file): output =", "from parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to", "for s in switches: host = s['switch_address'] for (errorIndication, errorStatus, errorIndex, varBinds) in", "break else: data = [] for varBind in varBinds: element = str(varBind) element", "in varBinds: element = str(varBind) element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr", "retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus:", "import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN", "elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or", "= \"\" for j in output: text += j + '\\n' with open('mac_mapper.txt',", "def mac_mapper(file): output = [] mac_addresses = [] for s in switches: host", "UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication,", "ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s'", "mac_address) print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j in output:", "print(\"['SWITCH ADDRESS,MAC ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j in output: text", "pysnmp.hlapi import * from parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env", "\"\" for j in output: text += j + '\\n' with open('mac_mapper.txt', \"w\")", "file=sys.stderr) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) -", "config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number", "- 1: output = output + hex(int(arr[i])).replace('0x', '').upper() else: output = output +", "in output: text += j + '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text)", "element = element.replace(\"SNMPv2-SMI::mib-2.17.4.3.1.2.\", \"\").replace(\" = \", \";\") splitArr = element.split(\";\") mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0]))", "vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output = '' for i", "if i == len(arr) - 1: output = output + hex(int(arr[i])).replace('0x', '').upper() else:", "2, retries = 5), ContextData(), ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif", "ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False): if errorIndication: print(errorIndication, file=sys.stderr) break elif errorStatus: print('%s at %s' %", "return output def mac_mapper(file): output = [] mac_addresses = [] for s in", "(errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(), CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries =", "import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8 #.env file need to have VLAN number &", "from pysnmp.hlapi import * from parse_config import config import sys #Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8", "\"w\") as f: f.write(text) if file != None: with open(file, \"w\") as f:", "output: text += j + '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if", "len(arr) - 1: output = output + hex(int(arr[i])).replace('0x', '').upper() else: output = output", "None: with open(file, \"w\") as f: for address in mac_addresses: f.write(address+\"\\n\") if __name__", "config['switches'] vlan = config['private_number'] def decToHexAddress(arg): arr = arg.split(\".\") output = '' for", "text += j + '\\n' with open('mac_mapper.txt', \"w\") as f: f.write(text) if file", "ADDRESS;PORT']\") print(data) output.extend(data) text = \"\" for j in output: text += j" ]
[ "jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if escolha != \"O\" and escolha", "valida_escolha(escolha): if escolha != \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\"", "} def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome", "def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da", "')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] ==", "str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] =", "= str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai", "\"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? '))", "escolha != \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"]", "\"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente, Qual você quer?", "{jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"]", "da pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer", "jogadores = { \"jogador_1\":{ \"name\": \"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\":", "jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] =", "\"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome?", "nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar com vc: '))", "seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar com vc:", "\"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] =", "= str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"]", "vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"]", "')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\"", "\"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"]", "== \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha):", "inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa", "jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente, Qual", "\"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores():", "} } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] =", "and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente, Qual você quer? [O/X]", "jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que", "')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"]", "vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper()", "jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if escolha", "= \"X\" return jogadores def valida_escolha(escolha): if escolha != \"O\" and escolha !=", "str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar", "valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return", "quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else:", "escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] =", "= { \"jogador_1\":{ \"name\": \"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\"", "\"jogador_1\":{ \"name\": \"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } }", "= str(input('Nome da pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual", "jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"])", "jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\":", "\"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu", "que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X]", "[O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] =", "\"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e seu nome? ')) jogadores[\"jogador_2\"][\"name\"]", "!= \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente, Qual você", "\"name\": \"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def", "com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if", "você quer {jogadores[\"jogador_1\"][\"name\"]}? [O/X] ')).upper() valida_escolha(jogadores[\"jogador_1\"][\"escolha\"]) if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\"", "return jogadores def valida_escolha(escolha): if escolha != \"O\" and escolha != \"X\": while", "str(input('Nome da pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você", "e seu nome? ')) jogadores[\"jogador_2\"][\"name\"] = str(input('Nome da pessoa que vai jogar com", "else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if escolha != \"O\" and", "if jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores", "and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"]", "jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente, Qual você quer? [O/X] \")).upper()", "def valida_escolha(escolha): if escolha != \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] !=", "= \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if escolha !=", "while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente novamente,", "\"X\" return jogadores def valida_escolha(escolha): if escolha != \"O\" and escolha != \"X\":", "pessoa que vai jogar com vc: ')) jogadores[\"jogador_1\"][\"escolha\"] = str(input(f'Qual você quer {jogadores[\"jogador_1\"][\"name\"]}?", "}, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual", "jogadores def valida_escolha(escolha): if escolha != \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"]", "!= \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] !=", "\"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\":", "\"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if escolha != \"O\"", "!= \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO:", "\"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and jogadores[\"jogador_1\"][\"escolha\"] != \"X\": jogadores[\"jogador_1\"][\"escolha\"] = str(input(\"ERRO: tente", "\"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" } } def inicia_jogadores(): jogadores[\"jogador_1\"][\"name\"] = str(input('Qual e", "jogadores[\"jogador_1\"][\"escolha\"] == \"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def", "if escolha != \"O\" and escolha != \"X\": while jogadores[\"jogador_1\"][\"escolha\"] != \"O\" and", "\"X\": jogadores[\"jogador_2\"][\"escolha\"] = \"O\" else: jogadores[\"jogador_2\"][\"escolha\"] = \"X\" return jogadores def valida_escolha(escolha): if", "{ \"jogador_1\":{ \"name\": \"\", \"escolha\": \"\" }, \"jogador_2\":{ \"name\": \"\", \"escolha\": \"\" }" ]
[ "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api',", "= [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY',", "= [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users',", "# Generated by Django 2.2.10 on 2020-02-23 05:57 from django.db import migrations, models", "('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ),", "migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR',", "'0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type',", "models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations =", "('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory',", "on 2020-02-23 05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR', max_length=255), ), ]", "operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'),", "<filename>core/apps/kubeops_api/migrations/0063_auto_20200223_0557.py # Generated by Django 2.2.10 on 2020-02-23 05:57 from django.db import migrations,", "2.2.10 on 2020-02-23 05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "Generated by Django 2.2.10 on 2020-02-23 05:57 from django.db import migrations, models class", "model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR', max_length=255),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations", "'0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField(", "dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item',", "] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR',", "name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR', max_length=255), ),", "05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'),", "Django 2.2.10 on 2020-02-23 05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "[ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'),", "[ migrations.AddField( model_name='item', name='users', field=models.ManyToManyField(to='users.UserProfile'), ), migrations.AlterField( model_name='clusterhealthhistory', name='date_type', field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')],", "class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'),", "by Django 2.2.10 on 2020-02-23 05:57 from django.db import migrations, models class Migration(migrations.Migration):", "2020-02-23 05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users',", "Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ] operations = [ migrations.AddField(", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0002_userprofile'), ('kubeops_api', '0062_auto_20200221_0510'), ]" ]
[ "def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the allocation", "== ( \"\\nTo access the allocation and notebook deployments from cluster,\" \" you", "snippet.\\n\" \"You may need to change the cluster name if it's different in\"", "if it's different in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\"", "deployments from cluster,\" \" you can use the following snippet.\\n\" \"You may need", "idact.detail.jupyter_app.format_deployments_info import \\ format_deployments_info def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == (", "from cluster,\" \" you can use the following snippet.\\n\" \"You may need to", "\"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes = deployments.nodes[-1]\\n\"", "allocation and notebook deployments from cluster,\" \" you can use the following snippet.\\n\"", "idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes = deployments.nodes[-1]\\n\" \"nb", "\"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes =", "\"You may need to change the cluster name if it's different in\" \"", "formatted == ( \"\\nTo access the allocation and notebook deployments from cluster,\" \"", "it's different in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster", "change the cluster name if it's different in\" \" the target environment.\\n\" \"----------------\\n\"", "\\ format_deployments_info def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access", "different in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster =", "you can use the following snippet.\\n\" \"You may need to change the cluster", "notebook deployments from cluster,\" \" you can use the following snippet.\\n\" \"You may", "in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\"", "can use the following snippet.\\n\" \"You may need to change the cluster name", "format_deployments_info def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the", "target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\"", "and notebook deployments from cluster,\" \" you can use the following snippet.\\n\" \"You", "the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments =", "the following snippet.\\n\" \"You may need to change the cluster name if it's", "environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes", "name if it's different in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact import", "\"\\nTo access the allocation and notebook deployments from cluster,\" \" you can use", "( \"\\nTo access the allocation and notebook deployments from cluster,\" \" you can", "import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes = deployments.nodes[-1]\\n\" \"nb =", "from idact.detail.jupyter_app.format_deployments_info import \\ format_deployments_info def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted ==", "the allocation and notebook deployments from cluster,\" \" you can use the following", "use the following snippet.\\n\" \"You may need to change the cluster name if", "access the allocation and notebook deployments from cluster,\" \" you can use the", "may need to change the cluster name if it's different in\" \" the", "= format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the allocation and notebook deployments", "\" the target environment.\\n\" \"----------------\\n\" \"from idact import show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments", "following snippet.\\n\" \"You may need to change the cluster name if it's different", "the cluster name if it's different in\" \" the target environment.\\n\" \"----------------\\n\" \"from", "import \\ format_deployments_info def test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo", "to change the cluster name if it's different in\" \" the target environment.\\n\"", "\"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes = deployments.nodes[-1]\\n\" \"nb = deployments.jupyter_deployments[-1]\\n\" \"----------------\")", "need to change the cluster name if it's different in\" \" the target", "format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the allocation and notebook deployments from", "\" you can use the following snippet.\\n\" \"You may need to change the", "cluster name if it's different in\" \" the target environment.\\n\" \"----------------\\n\" \"from idact", "assert formatted == ( \"\\nTo access the allocation and notebook deployments from cluster,\"", "cluster,\" \" you can use the following snippet.\\n\" \"You may need to change", "show_cluster\\n\" \"cluster = show_cluster('cluster1')\\n\" \"deployments = cluster.pull_deployments()\\n\" \"nodes = deployments.nodes[-1]\\n\" \"nb = deployments.jupyter_deployments[-1]\\n\"", "test_format_deployments_info(): formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the allocation and", "formatted = format_deployments_info(cluster_name='cluster1') assert formatted == ( \"\\nTo access the allocation and notebook" ]
[ "try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image = Image.new('1',", "drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill = 0) drawred.text((210,", "40), str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font =", "Drawing on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126", "Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18)", "drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen() #e_paper.exit() except", "ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image horizontal_black_image", "value = ', font = font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5", "255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values =", "# Red color e-paper import sys import os import lib_2inch7_ec_paper import time from", "= ', font = font28, fill = 0) drawblack.text((10, 120), 'PMS 10 value", "font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen() #e_paper.exit() except KeyboardInterrupt: epd_2in7_color_air.e_paperconfig.module_exit()", "1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is", "os import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor", "Drawing on the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear", "drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28, fill = 0)", "120), 'PMS 10 value =', font = font28, fill = 0) drawred.text((210, 40),", "= ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0) drawblack.text((10,", "image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame red_image", "{}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10,", "MONITORING', font = font28, fill = 0) drawblack.text((10, 40), 'PMS 1 value =", "drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0)", "value = ', font = font28, fill = 0) drawblack.text((10, 120), 'PMS 10", "(e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1))", "is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image)", "= 0) drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill =", "font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image horizontal_black_image = Image.new('1',", "= 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210, 80),", "air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() #", "lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon =", "import os import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from pms_a003 import", "= 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen()", "Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1',", "value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1))", "2.5 value = ', font = font28, fill = 0) drawblack.text((10, 120), 'PMS", "# 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read()", "0) drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28, fill =", "font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0)", "drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font", "# 298*126 values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value", "is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font", "on the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the", "black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame red_image =", "font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill =", "80), 'PMS 2.5 value = ', font = font28, fill = 0) drawblack.text((10,", "e-paper import sys import os import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont", "Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) #", "= font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill =", "fill = 0) drawblack.text((10, 40), 'PMS 1 value = ', font = font28,", "{}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font =", "the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame", "40), 'PMS 1 value = ', font = font28, fill = 0) drawblack.text((10,", "values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1))", "import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True:", "Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init()", "is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack", "fill = 0) drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill", "ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width),", "120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen() #e_paper.exit() except KeyboardInterrupt:", "font = font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill", "e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values", "Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame red_image = Image.new('1', (e_paper.width,", "= Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing", "value =', font = font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font =", "print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image)", "80), str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28,", "drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0) drawblack.text((10, 40), 'PMS", "from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper", "clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'),", "Red color e-paper import sys import os import lib_2inch7_ec_paper import time from PIL", "= lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image = Image.new('1', (e_paper.width, e_paper.height),", "value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING',", "255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on", "from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600)", "10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR", "drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28,", "font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal", "import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon", "font = font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill", "255: clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28 =", "font = font28, fill = 0) drawblack.text((10, 40), 'PMS 1 value = ',", "PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while", "print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0),", "18) # Drawing on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255)", "red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 =", "'PMS 2.5 value = ', font = font28, fill = 0) drawblack.text((10, 120),", "fill = 0) drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28,", "image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height,", "Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS 1 value is", "font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0)", "frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18", "# font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the", "(e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) #", "font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image))", "font28, fill = 0) drawblack.text((10, 40), 'PMS 1 value = ', font =", "0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen() #e_paper.exit()", "= ', font = font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5 value", "on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image", "(e_paper.width, e_paper.height), 255) # 255: clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height),", "e_paper.height), 255) # 255: clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255)", "= font28, fill = 0) drawblack.text((10, 120), 'PMS 10 value =', font =", "298*126 values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is", "str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill", "ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28, fill =", "e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS", "air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the", "= Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame red_image = Image.new('1',", "# Drawing on the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255:", "import sys import os import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from", "= Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'),", "= 0) drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28, fill", "the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image =", "{}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack =", "font = font28, fill = 0) drawblack.text((10, 120), 'PMS 10 value =', font", "0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1),", "# Drawing on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) #", "fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210,", "0), 'AIR MONITORING', font = font28, fill = 0) drawblack.text((10, 40), 'PMS 1", "pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper =", "= font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill =", "while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image", "(e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126", "Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on", "255) # 255: clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) #", "value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred =", "the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28)", "'PMS 1 value = ', font = font28, fill = 0) drawblack.text((10, 80),", "10 value =', font = font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font", "sys import os import lib_2inch7_ec_paper import time from PIL import Image,ImageDraw,ImageFont from pms_a003", "= font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0)", "= 0) drawblack.text((10, 40), 'PMS 1 value = ', font = font28, fill", "drawblack.text((10, 40), 'PMS 1 value = ', font = font28, fill = 0)", "ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0) drawblack.text((10, 40),", "baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image", "= ImageDraw.Draw(horizontal_black_image) drawred = ImageDraw.Draw(horizontal_red_image) drawred.text((10, 0), 'AIR MONITORING', font = font28, fill", "= font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4) e_paper.Clear_screen() #e_paper.exit() except KeyboardInterrupt: epd_2in7_color_air.e_paperconfig.module_exit() exit()", "fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210,", "horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS 1", "e_paper.init() # Drawing on the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255) #", "import time from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor()", "time from PIL import Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\",", "# 255: clear the frame red_image = Image.new('1', (e_paper.width, e_paper.height), 255) # font28", "= Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS 1 value", "0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font", "horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width),", "font28, fill = 0) drawblack.text((10, 120), 'PMS 10 value =', font = font28,", "Image,ImageDraw,ImageFont from pms_a003 import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try:", "0) drawblack.text((10, 40), 'PMS 1 value = ', font = font28, fill =", "fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0) e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image)) time.sleep(4)", "= Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255)", "drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1), font =", "2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value is {}\".format(values.pm100_cf1)) drawblack = ImageDraw.Draw(horizontal_black_image) drawred", "=', font = font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1), font = font28,", "= 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0) drawred.text((210, 120),str(values.pm100_cf1),", "= font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5 value = ', font", "'AIR MONITORING', font = font28, fill = 0) drawblack.text((10, 40), 'PMS 1 value", "0) drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill = 0)", "str(values.pm10_cf1), font = font28, fill = 0) drawred.text((210, 80), str(values.pm25_cf1), font = font28,", "True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image =", "= ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image", "= air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS", "298*126 horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126 values = air_mon.read() print(\"PMS", "= ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image horizontal_black_image = Image.new('1', (e_paper.height,", "1 value = ', font = font28, fill = 0) drawblack.text((10, 80), 'PMS", "= font28, fill = 0) drawblack.text((10, 40), 'PMS 1 value = ', font", "'PMS 10 value =', font = font28, fill = 0) drawred.text((210, 40), str(values.pm10_cf1),", "import Sensor air_mon = Sensor() air_mon.connect_hat(port=\"/dev/ttyS0\", baudrate=9600) while True: try: e_paper = lib_2inch7_ec_paper.Ec_Paper()", "28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing on the Horizontal image horizontal_black_image =", "255) # 298*126 values = air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5", "e_paper = lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image = Image.new('1', (e_paper.width,", "e_paper.height), 255) # font28 = ImageFont.truetype(('images/Font.ttc'), 28) font18 = ImageFont.truetype(('images/Font.ttc'), 18) # Drawing", "print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10 value", "air_mon.read() print(\"PMS 1 value is {}\".format(values.pm10_cf1)) print(\"PMS 2.5 value is {}\".format(values.pm25_cf1)) print(\"PMS 10", "color e-paper import sys import os import lib_2inch7_ec_paper import time from PIL import", "', font = font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5 value =", "font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5 value = ', font =", "lib_2inch7_ec_paper.Ec_Paper() e_paper.init() # Drawing on the image black_image = Image.new('1', (e_paper.width, e_paper.height), 255)", "', font = font28, fill = 0) drawblack.text((10, 120), 'PMS 10 value =',", "font = font28, fill = 0) drawblack.text((10, 80), 'PMS 2.5 value = '," ]
[ "<gh_stars>1-10 #!/Users/davk/anaconda/envs/platformio_setup python import subprocess revision = subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).strip() print('-DPIO_SRC_REV=\"%s\"' % revision)" ]
[ "request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read() if (content):", "pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"]", "-*- coding:utf8 -*- import requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ =", "修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self,", "if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host", "'number=' + number + '&type=' + type_ bodys = {} url = host", "return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign =", "1: break if code == 0: return [],[],1 result = json.loads(result) ilist =", "company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist = [] p2list", "+item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0] if", "json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = []", "\"__main__\": from pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\",", "= metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content xml", "def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content xml = lxml.etree.XML(content)", "= [] for x in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist", "= [] plist = [] p2list = [] for item in ilist: suminfo", "import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__", "content = response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据',''", "== 0: return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"]", "in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import", "result = json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist", "for item in ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for", "if not item in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name +", "= [] for item in ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip()", "if code == 0: return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company", "result[\"result\"][\"issign\"] wlist = [] plist = [] p2list = [] for item in", "item in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \"", "import requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ =", "def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content", "+ '&type=' + type_ bodys = {} url = host + path +", "response = urllib.request.urlopen(request) content = response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict", "= content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def", "= host + path + '?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE", "= \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表", "result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist = []", "ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response", "str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not item", "= xml.xpath('//channel/item/title') rlist = [] for x in clist: if \"中英字幕\" in x.text:", "except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for x in range(3):", "+ item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0:", "else 1 if __name__ == \"__main__\": from pprint import pprint meta = {", "+ '?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response", "+ appcode) response = urllib.request.urlopen(request) content = response.read() if (content): dict = content.decode('utf-8','ignore')", "__version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class", "= {} url = host + path + '?' + querys request =", "suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if", "{} url = host + path + '?' + querys request = urllib.request.Request(url)", "= result[\"result\"][\"issign\"] wlist = [] plist = [] p2list = [] for item", "plist = [] p2list = [] for item in ilist: suminfo = str(item[\"time\"]", "item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list", "\"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\"", "import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日", "in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code ==", "\"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata):", "break if code == 0: return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"]", "__init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content =", "metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content xml =", "\"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker = ExpressChecker(metadata=meta) a,b,c = checker.checkData(meta=meta) print(a,b,c)", "= response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for x", "try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode", "[],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"]", "+ number + '&type=' + type_ bodys = {} url = host +", "\"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"):", "0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for x in range(3): code,_,result =", "url = host + path + '?' + querys request = urllib.request.Request(url) request.add_header('Authorization',", "content = response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for", "if len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可", "xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for x in clist:", "wlist,p2list,0 if issign == \"1\" else 1 if __name__ == \"__main__\": from pprint", "for x in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'):", "== 1: break if code == 0: return [],[],1 result = json.loads(result) ilist", "for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break if", "pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2", "2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata =", "1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json", "\"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker = ExpressChecker(metadata=meta) a,b,c = checker.checkData(meta=meta)", "+ type_ bodys = {} url = host + path + '?' +", "in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \"", "2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def", "'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode = appcode querys = 'number='", "(content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\",", "meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1]", "return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for x in range(3): code,_,result", "'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read() if (content): dict", "wlist = [] plist = [] p2list = [] for item in ilist:", "return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import", "x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code", "\"1\" else 1 if __name__ == \"__main__\": from pprint import pprint meta =", "__log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\"", "\"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host =", "pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker", "0: p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0", "__title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07", "= 'GET' appcode = appcode querys = 'number=' + number + '&type=' +", "#因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else 1 if __name__ ==", "+ item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not item in meta.data: plist.append(item)", "xml.xpath('//channel/item/title') rlist = [] for x in clist: if \"中英字幕\" in x.text: rlist.append(x.text)", "json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\" 0.0.1", "urllib.request.urlopen(request) content = response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return", "= { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker = ExpressChecker(metadata=meta)", "host + path + '?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE '", "1 if __name__ == \"__main__\": from pprint import pprint meta = { \"name\":\"孔夫子书籍\",", "= [] p2list = [] for item in ilist: suminfo = str(item[\"time\"] +", "meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker =", "\"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker = ExpressChecker(metadata=meta) a,b,c =", "not item in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+", "wlist: if not item in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name", "'''从网络API获取信息''' response = requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title')", "json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break", "\"\" def checkData(self,meta): import json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if", "checkData(self,meta): import json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code ==", "= [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else 1 if", "#/usr/bin/env python3 # -*- coding:utf8 -*- import requests import lxml.etree import json import", "%s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list]", "self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code == 0: return [],[],1 result", "= '0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker:", "request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content =", "= response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except:", "== \"__main__\": from pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30,", "[] for x in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def", "= p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign", "'?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response =", "method = 'GET' appcode = appcode querys = 'number=' + number + '&type='", "querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content", "if issign == \"1\" else 1 if __name__ == \"__main__\": from pprint import", "requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2'", "+ path + '?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' +", "\"错误,未返回数据\", \"\" def checkData(self,meta): import json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip())", "0: return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign", "p2list = [] for item in ilist: suminfo = str(item[\"time\"] + \"::::::\" +", "for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \"", "import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] }", "class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息'''", "rlist = [] for x in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return", "\"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response =", "= '/express/query' method = 'GET' appcode = appcode querys = 'number=' + number", "p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign ==", "'GET' appcode = appcode querys = 'number=' + number + '&type=' + type_", "x in clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try:", "if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0,", "plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] +", "wlist.append(suminfo) for item in wlist: if not item in meta.data: plist.append(item) for item", "== \"1\" else 1 if __name__ == \"__main__\": from pprint import pprint meta", "appcode) response = urllib.request.urlopen(request) content = response.read() if (content): dict = content.decode('utf-8','ignore') return", "rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path =", "[] plist = [] p2list = [] for item in ilist: suminfo =", "x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path", "bodys = {} url = host + path + '?' + querys request", "\" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list =", "= urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read()", "getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist", "isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else", "必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else 1 if __name__ == \"__main__\":", "<gh_stars>1-10 #/usr/bin/env python3 # -*- coding:utf8 -*- import requests import lxml.etree import json", "= requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist =", "response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return", "coding:utf8 -*- import requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\"", "content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta):", "= result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist =", "dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\"", "issign == \"1\" else 1 if __name__ == \"__main__\": from pprint import pprint", "{ \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\", \"data\":[\"\"] } checker = ExpressChecker(metadata=meta) a,b,c", "plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if", "-*- import requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__", "import json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1:", "\"\"\" def __init__(self,metadata): self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss)", "return wlist,p2list,0 if issign == \"1\" else 1 if __name__ == \"__main__\": from", "'/express/query' method = 'GET' appcode = appcode querys = 'number=' + number +", "def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method =", "for item in wlist: if not item in meta.data: plist.append(item) for item in", "p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else 1", "0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for x in", "range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code == 0:", "if code == 1: break if code == 0: return [],[],1 result =", "= result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist = [] p2list =", "= \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def", "appcode = appcode querys = 'number=' + number + '&type=' + type_ bodys", "in ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in", "0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata = metadata", "> 0: p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return", "= str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not", "urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode = appcode", "= 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode = appcode querys =", "= lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for x in clist: if", "if __name__ == \"__main__\": from pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234,", "\" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0]", "code == 0: return [],[],1 result = json.loads(result) ilist = result[\"result\"][\"list\"] company =", "import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ = \"\"\"", "return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query'", "path + '?' + querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode)", "response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for x in", "' + appcode) response = urllib.request.urlopen(request) content = response.read() if (content): dict =", "__name__ == \"__main__\": from pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1,", "querys = 'number=' + number + '&type=' + type_ bodys = {} url", "ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist:", "= 'number=' + number + '&type=' + type_ bodys = {} url =", "= json.loads(result) ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist =", "+ \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not item in", "clist = xml.xpath('//channel/item/title') rlist = [] for x in clist: if \"中英字幕\" in", "'0.0.2' __log__ = \"\"\" 0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类", "return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for x", "[] for item in ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo)", "def checkData(self,meta): import json for x in range(3): code,_,result = self.checkExpress(number=str(meta.info).strip()) if code", "[] p2list = [] for item in ilist: suminfo = str(item[\"time\"] + \"::::::\"", "in wlist: if not item in meta.data: plist.append(item) for item in plist: p2list.append(\"[快递状态更新]\"+\"", "= appcode querys = 'number=' + number + '&type=' + type_ bodys =", "appcode querys = 'number=' + number + '&type=' + type_ bodys = {}", "\" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str): p2list", "number + '&type=' + type_ bodys = {} url = host + path", "rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method", "host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode = appcode querys", "ilist = result[\"result\"][\"list\"] company = result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist", "item in ilist: suminfo = str(item[\"time\"] + \"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item", "# -*- coding:utf8 -*- import requests import lxml.etree import json import pickle,traceback,shelve,time,sys __title__", "lxml.etree import json import pickle,traceback,shelve,time,sys __title__ = \"快递更新查询程序\" __version__ = '0.0.2' __log__ =", "checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET'", "= urllib.request.urlopen(request) content = response.read() if (content): dict = content.decode('utf-8','ignore') return 1,'查询成功',dict else:", "in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info))", "p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if", "else: return 0,'错误,未返回数据','' except: return 0, \"错误,未返回数据\", \"\" def checkData(self,meta): import json for", "| %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str): p2list =", "code == 1: break if code == 0: return [],[],1 result = json.loads(result)", "item in plist: p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" |", "path = '/express/query' method = 'GET' appcode = appcode querys = 'number=' +", "0.0.1 2018年3月4日 0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表 \"\"\" class ExpressChecker: \"\"\"检查快递更新状态的类 \"\"\" def __init__(self,metadata): self.metadata", "requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = []", "len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历", "\"::::::\" + item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not item in meta.data:", "[p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\" else 1 if __name__", "p2list.append(\"[快递状态更新]\"+\" %s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list)", "if isinstance(p2list,str): p2list = [p2list] #因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历 return wlist,p2list,0 if issign == \"1\"", "urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read() if", "issign = result[\"result\"][\"issign\"] wlist = [] plist = [] p2list = [] for", "clist: if \"中英字幕\" in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request", "self.metadata = metadata def getInfo(self, rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content", "import urllib.request host = 'http://jisukdcx.market.alicloudapi.com' path = '/express/query' method = 'GET' appcode =", "result[\"result\"][\"type\"] issign = result[\"result\"][\"issign\"] wlist = [] plist = [] p2list = []", "= self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code == 0: return [],[],1", "item[\"status\"]).strip() wlist.append(suminfo) for item in wlist: if not item in meta.data: plist.append(item) for", "from pprint import pprint meta = { \"name\":\"孔夫子书籍\", \"id\":24322234, \"status\":1, \"rate\":30, \"type\":\"express\", \"info\":\"5083078\",", "item in wlist: if not item in meta.data: plist.append(item) for item in plist:", "%s:\\n\"%meta.name + item.split(\"::::::\")[0]+ \" \" +item.split(\"::::::\")[1] + \" | %s:%s\"%(company,meta.info)) if len(p2list) >", "type_ bodys = {} url = host + path + '?' + querys", "+ \" | %s:%s\"%(company,meta.info)) if len(p2list) > 0: p2list = p2list[0] if isinstance(p2list,str):", "response = requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist", "+ querys request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request)", "python3 # -*- coding:utf8 -*- import requests import lxml.etree import json import pickle,traceback,shelve,time,sys", "rss=\"\"): '''从网络API获取信息''' response = requests.get(rss) content = response.content xml = lxml.etree.XML(content) clist =", "code,_,result = self.checkExpress(number=str(meta.info).strip()) if code == 1: break if code == 0: return", "'&type=' + type_ bodys = {} url = host + path + '?'", "lxml.etree.XML(content) clist = xml.xpath('//channel/item/title') rlist = [] for x in clist: if \"中英字幕\"", "in x.text: rlist.append(x.text) return rlist def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'): try: import urllib.request host = 'http://jisukdcx.market.alicloudapi.com'" ]
[ "ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000", "= G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain =", "GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath() X = G.X G.generateResponse() Y", "get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths =", "= get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths", "101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath() X =", "-*- coding: utf-8 -*- \"\"\" Created on Mon Jan 17 15:42:09 2022 @author:", "X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval =", "= G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size", "add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg =", "GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval,", "= \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm =", "= True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test))", "= select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain,", "\"\"\" from train import select_hatm_cv from tools import add_time from train import SignatureRegressionNik", "G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath() X = G.X", "train import select_hatm_cv from tools import add_time from train import SignatureRegressionNik from dataGeneration", "= 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath() X", "train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d,", "utf-8 -*- \"\"\" Created on Mon Jan 17 15:42:09 2022 @author: nikth \"\"\"", "Jan 17 15:42:09 2022 @author: nikth \"\"\" from train import select_hatm_cv from tools", "Mon Jan 17 15:42:09 2022 @author: nikth \"\"\" from train import select_hatm_cv from", "-*- \"\"\" Created on Mon Jan 17 15:42:09 2022 @author: nikth \"\"\" from", "add_time from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split", "coding: utf-8 -*- \"\"\" Created on Mon Jan 17 15:42:09 2022 @author: nikth", "nPaths,num = num) G.generatePath() X = G.X G.generateResponse() Y = G.Y X_train, X_test,", "SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\",", "5 nPaths = 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths =", "nikth \"\"\" from train import select_hatm_cv from tools import add_time from train import", "<gh_stars>0 # -*- coding: utf-8 -*- \"\"\" Created on Mon Jan 17 15:42:09", "0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling =", "= GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath() X = G.X G.generateResponse()", "import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain,", "= nPaths,num = num) G.generatePath() X = G.X G.generateResponse() Y = G.Y X_train,", "= add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg", "train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain,", "add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures =", "train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train,", "scale_X=scale_X) dimPath = 5 nPaths = 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath", "Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000 num =", "select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train)", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Jan 17 15:42:09 2022", "from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval =", "G.generatePath() X = G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test =", "nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000 num", "G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size =", "X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval", "import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval,", "num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num) G.generatePath()", "17 15:42:09 2022 @author: nikth \"\"\" from train import select_hatm_cv from tools import", "= 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling", "@author: nikth \"\"\" from train import select_hatm_cv from tools import add_time from train", "= 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num =", "= SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train))", "d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000 num = 101 G =", "import select_hatm_cv from tools import add_time from train import SignatureRegressionNik from dataGeneration import", "True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\", sig_reg.score(Xtimeval, Y_test)) print(\"training.R\",", "dimPath = 5 nPaths = 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath =", "\\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain,", "G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train)", "2022 @author: nikth \"\"\" from train import select_hatm_cv from tools import add_time from", "scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval,", "Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath", "sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain,", "= 5 nPaths = 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths", "= num) G.generatePath() X = G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train,", "X = G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test = \\", "SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval", "= True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\", sig_reg.score(Xtimeval, Y_test))", "dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type,", "num) G.generatePath() X = G.X G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test", "dimPath,nPaths = nPaths,num = num) G.generatePath() X = G.X G.generateResponse() Y = G.Y", "G.generateResponse() Y = G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5)", "Created on Mon Jan 17 15:42:09 2022 @author: nikth \"\"\" from train import", "True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\",", "Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\", sig_reg.score(Xtimeval, Y_test)) print(\"training.R\", sig_reg.score(Xtimetrain, Y_train))", "tools import add_time from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection", "sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, #", "Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\",", "import add_time from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import", "from tools import add_time from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from", "from train import select_hatm_cv from tools import add_time from train import SignatureRegressionNik from", "import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints,", "# npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000 num = 101", "from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax from sklearn.model_selection import train_test_split #Xtrain,", "= add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures", "Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test)", "nPaths = 1000 num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num", "from sklearn.model_selection import train_test_split #Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type,", "select_hatm_cv from tools import add_time from train import SignatureRegressionNik from dataGeneration import GeneratorFermanianDependentMax", "Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath = 5", "normalizeFeatures = True) sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\", sig_reg.score(Xtimeval,", "\"\"\" Created on Mon Jan 17 15:42:09 2022 @author: nikth \"\"\" from train", "Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm", "Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X) dimPath =", "npoints=npoints, d=d, scale_X=scale_X) dimPath = 5 nPaths = 1000 num = 101 G", "1000 num = 101 G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num)", "Xtimetrain = add_time(X_train) Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True)", "15:42:09 2022 @author: nikth \"\"\" from train import select_hatm_cv from tools import add_time", "#Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type, # npoints=npoints, d=d, scale_X=scale_X)", "Y = G.Y X_train, X_test, Y_train, Y_test = \\ train_test_split(X,Y,test_size = 0.5) Xtimetrain", "on Mon Jan 17 15:42:09 2022 @author: nikth \"\"\" from train import select_hatm_cv", "= dimPath,nPaths = nPaths,num = num) G.generatePath() X = G.X G.generateResponse() Y =", "Xtimeval = add_time(X_test) hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm,", "hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True) sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True)", "sig_reg.fit(Xtimetrain, Y_train) print(\"val.error\", sig_reg.get_loss(Xtimeval, Y_test)) print(\"training.error\", sig_reg.get_loss(Xtimetrain, Y_train)) print(\"val.R\", sig_reg.score(Xtimeval, Y_test)) print(\"training.R\", sig_reg.score(Xtimetrain," ]
[ "X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data)", "npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim ==", "npt import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def", "of input layer is not correct\" assert output_dim == 1 , \"Dimension of", "= dp.data_info(data) assert input_dim == 2,\"Dimension of input layer is not correct\" assert", "X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of input layer is not correct\"", "pandas as pd import numpy as np import numpy.testing as npt import data_process", "data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of input layer", "numpy as np import numpy.testing as npt import data_process as dp def test_read_file():", "test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim =", "== 2,\"Dimension of input layer is not correct\" assert output_dim == 1 ,", "os import pandas as pd import numpy as np import numpy.testing as npt", "<filename>neuralizer/tests/test_data_process.py from __future__ import absolute_import,division,print_function import os import pandas as pd import numpy", "as npt import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5])", "is not correct\" assert output_dim == 1 , \"Dimension of output layer is", "{\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of input layer is not", "import absolute_import,division,print_function import os import pandas as pd import numpy as np import", "= {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of input layer is", "correct\" assert output_dim == 1 , \"Dimension of output layer is not correct\"", "not correct\" assert output_dim == 1 , \"Dimension of output layer is not", "test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of input", "import numpy.testing as npt import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\")", "def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim", "import numpy as np import numpy.testing as npt import data_process as dp def", "absolute_import,division,print_function import os import pandas as pd import numpy as np import numpy.testing", "as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data =", "= dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert", "pd import numpy as np import numpy.testing as npt import data_process as dp", "import pandas as pd import numpy as np import numpy.testing as npt import", "input_dim == 2,\"Dimension of input layer is not correct\" assert output_dim == 1", "import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info():", "input layer is not correct\" assert output_dim == 1 , \"Dimension of output", "as pd import numpy as np import numpy.testing as npt import data_process as", "npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension", "data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data", "2,\"Dimension of input layer is not correct\" assert output_dim == 1 , \"Dimension", "from __future__ import absolute_import,division,print_function import os import pandas as pd import numpy as", "np import numpy.testing as npt import data_process as dp def test_read_file(): X,Y =", "layer is not correct\" assert output_dim == 1 , \"Dimension of output layer", "numpy.testing as npt import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]])", "def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,\"Dimension of", "dp.data_info(data) assert input_dim == 2,\"Dimension of input layer is not correct\" assert output_dim", "import os import pandas as pd import numpy as np import numpy.testing as", "dp def test_read_file(): X,Y = dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"}", "assert input_dim == 2,\"Dimension of input layer is not correct\" assert output_dim ==", "__future__ import absolute_import,division,print_function import os import pandas as pd import numpy as np", "dp.read_file('test.tsv',[\"A\",\"B\"],\"y\") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {\"filename\":\"test.tsv\",\"X_var\":[\"A\",\"B\"],\"Y_var\":\"y\"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim", "as np import numpy.testing as npt import data_process as dp def test_read_file(): X,Y" ]
[ "stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue if node.right and", "and self.right==None def preorder(root): ''' return [node.v] ''' stack = [root] visited =", "visited = set() res = [] while stack: node = stack[-1] if node", "node = stack[-1] if node not in visited: visited.add(node) res.append(node.v) if node.left and", "visited: stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1)", "Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def", "[node.v] ''' stack = [root] visited = set() res = [] while stack:", "not in visited: stack.append(node.right) continue if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1)", "res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0),", "node.left not in visited: stack.append(node.left) continue if node.right and node.right not in visited:", "stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return", "left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3,", "def __init__(self, v): self.v = v self.left = None self.right = None def", "self.left = None self.right = None def is_terminal(self): return self.left==None and self.right==None def", "if node.left and node.left not in visited: stack.append(node.left) continue if node.right and node.right", "and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root): '''", "continue if node.right and node.right not in visited: stack.append(node.right) continue if node not", "and node.left not in visited: stack.append(node.left) continue if node.right and node.right not in", "not in visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root): ''' return [node.v]", "not in visited: stack.append(node.left) continue if node not in visited: visited.add(node) res.append(node.v) if", "stack: node = stack[-1] if node not in visited: visited.add(node) res.append(node.v) if node.left", "not in visited: visited.add(node) res.append(node.v) if node.right and node.right not in visited: stack.append(node.right)", "in visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root): ''' return [node.v] '''", "in visited: stack.append(node.left) continue if node not in visited: visited.add(node) res.append(node.v) if node.right", "Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left = n3 n2.right = n4 n4.right", "return res def inorder(root): ''' return [node.v] ''' stack = [root] visited =", "self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1),", "test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2) root.left, root.right = left, right", "in visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root): ''' return [node.v] '''", "return res class XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2)", "n1.left = n2 n2.left = n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1),", "inorder(root): ''' return [node.v] ''' stack = [root] visited = set() res =", "left, right = Tree(0), Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2])", "stack[-1] if node not in visited: visited.add(node) res.append(node.v) if node.left and node.left not", "class Tree: def __init__(self, v): self.v = v self.left = None self.right =", "postorder(root): ''' return [node.v] ''' stack = [root] visited = set() res =", "v self.left = None self.right = None def is_terminal(self): return self.left==None and self.right==None", "stack.append(node.right) continue stack.pop(-1) return res def postorder(root): ''' return [node.v] ''' stack =", "= stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue if node.right", "stack.append(node.right) continue stack.pop(-1) return res def inorder(root): ''' return [node.v] ''' stack =", "continue if node not in visited: visited.add(node) res.append(node.v) if node.right and node.right not", "return [node.v] ''' stack = [root] visited = set() res = [] while", "Tree(4), Tree(5) n1.left = n2 n2.left = n3 n2.right = n4 n4.right =", "n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1]) self.assertEqual(postorder(n1), [3,5,4,2,1]) if __name__==\"__main__\": unittest.main()", "node = stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue if", "''' stack = [root] visited = set() res = [] while stack: node", "the Coding Interview/tree_search_by_stack.py<gh_stars>1-10 #!/usr/bin/env python3 import unittest class Tree: def __init__(self, v): self.v", "def postorder(root): ''' return [node.v] ''' stack = [root] visited = set() res", "[root] visited = set() res = [] while stack: node = stack[-1] if", "res = [] while stack: node = stack[-1] if node.left and node.left not", "if node.left and node.left not in visited: stack.append(node.left) continue if node not in", "node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root): ''' return", "if node not in visited: visited.add(node) res.append(node.v) if node.right and node.right not in", "visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root): ''' return [node.v] ''' stack", "continue stack.pop(-1) return res def postorder(root): ''' return [node.v] ''' stack = [root]", "in visited: visited.add(node) res.append(node.v) if node.left and node.left not in visited: stack.append(node.left) continue", "res.append(node.v) if node.left and node.left not in visited: stack.append(node.left) continue if node.right and", "in visited: stack.append(node.right) continue if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return", "visited: stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right) continue if", "= [root] visited = set() res = [] while stack: node = stack[-1]", "and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root): '''", "continue if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase):", "continue stack.pop(-1) return res def inorder(root): ''' return [node.v] ''' stack = [root]", "class XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2) root.left, root.right", "n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1]) self.assertEqual(postorder(n1), [3,5,4,2,1]) if", "Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0])", "= [] while stack: node = stack[-1] if node.left and node.left not in", "set() res = [] while stack: node = stack[-1] if node.left and node.left", "= stack[-1] if node not in visited: visited.add(node) res.append(node.v) if node.left and node.left", "n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left", "root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1,", "self.right==None def preorder(root): ''' return [node.v] ''' stack = [root] visited = set()", "def test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2) root.left, root.right = left,", "res def postorder(root): ''' return [node.v] ''' stack = [root] visited = set()", "visited: visited.add(node) res.append(node.v) if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1)", "= set() res = [] while stack: node = stack[-1] if node not", "Coding Interview/tree_search_by_stack.py<gh_stars>1-10 #!/usr/bin/env python3 import unittest class Tree: def __init__(self, v): self.v =", "Tree(0), Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root),", "visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root): ''' return [node.v] ''' stack", "None self.right = None def is_terminal(self): return self.left==None and self.right==None def preorder(root): '''", "return res def postorder(root): ''' return [node.v] ''' stack = [root] visited =", "stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0), Tree(1),", "= n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1]) self.assertEqual(postorder(n1),", "self.v = v self.left = None self.right = None def is_terminal(self): return self.left==None", "root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self):", "[1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1), Tree(2),", "Interview/tree_search_by_stack.py<gh_stars>1-10 #!/usr/bin/env python3 import unittest class Tree: def __init__(self, v): self.v = v", "n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left = n3", "not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root,", "n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left =", "def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5)", "node.left not in visited: stack.append(node.left) continue if node not in visited: visited.add(node) res.append(node.v)", "right = Tree(0), Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root),", "stack.append(node.left) continue if node not in visited: visited.add(node) res.append(node.v) if node.right and node.right", "visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root, left, right =", "visited: stack.append(node.left) continue if node not in visited: visited.add(node) res.append(node.v) if node.right and", "= n2 n2.left = n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5])", "def inorder(root): ''' return [node.v] ''' stack = [root] visited = set() res", "node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self):", "in visited: stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right) continue", "self.right = None def is_terminal(self): return self.left==None and self.right==None def preorder(root): ''' return", "XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2) root.left, root.right =", "visited.add(node) res.append(node.v) if node.left and node.left not in visited: stack.append(node.left) continue if node.right", "n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2", "n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left =", "stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right) continue if node", "stack.pop(-1) return res def inorder(root): ''' return [node.v] ''' stack = [root] visited", "visited: stack.append(node.right) continue if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res", "= [] while stack: node = stack[-1] if node not in visited: visited.add(node)", "node not in visited: visited.add(node) res.append(node.v) if node.left and node.left not in visited:", "None def is_terminal(self): return self.left==None and self.right==None def preorder(root): ''' return [node.v] '''", "#!/usr/bin/env python3 import unittest class Tree: def __init__(self, v): self.v = v self.left", "return self.left==None and self.right==None def preorder(root): ''' return [node.v] ''' stack = [root]", "is_terminal(self): return self.left==None and self.right==None def preorder(root): ''' return [node.v] ''' stack =", "stack.pop(-1) return res def postorder(root): ''' return [node.v] ''' stack = [root] visited", "__init__(self, v): self.v = v self.left = None self.right = None def is_terminal(self):", "stack = [root] visited = set() res = [] while stack: node =", "visited = set() res = [] while stack: node = stack[-1] if node.left", "= stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue if node", "n2.left = n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1])", "in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root, left,", "<filename>quizzes/00.organize.me/Cracking the Coding Interview/tree_search_by_stack.py<gh_stars>1-10 #!/usr/bin/env python3 import unittest class Tree: def __init__(self, v):", "= v self.left = None self.right = None def is_terminal(self): return self.left==None and", "node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root): ''' return", "= left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2,", "python3 import unittest class Tree: def __init__(self, v): self.v = v self.left =", "= None self.right = None def is_terminal(self): return self.left==None and self.right==None def preorder(root):", "not in visited: visited.add(node) res.append(node.v) if node.left and node.left not in visited: stack.append(node.left)", "visited: visited.add(node) res.append(node.v) if node.left and node.left not in visited: stack.append(node.left) continue if", "and node.right not in visited: stack.append(node.right) continue if node not in visited: visited.add(node)", "if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def", "= Tree(0), Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2])", "right self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4,", "unittest class Tree: def __init__(self, v): self.v = v self.left = None self.right", "= Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left = n3 n2.right", "= n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1]) self.assertEqual(postorder(n1), [3,5,4,2,1]) if __name__==\"__main__\":", "node.right and node.right not in visited: stack.append(node.right) continue if node not in visited:", "[0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 =", "root, left, right = Tree(0), Tree(1), Tree(2) root.left, root.right = left, right self.assertEqual(preorder(root),", "if node.right and node.right not in visited: stack.append(node.right) continue if node not in", "Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left = n3 n2.right =", "def is_terminal(self): return self.left==None and self.right==None def preorder(root): ''' return [node.v] ''' stack", "stack: node = stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue", "node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def inorder(root):", "= set() res = [] while stack: node = stack[-1] if node.left and", "visited.add(node) res.append(node.v) if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return", "n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1), [3,2,4,5,1]) self.assertEqual(postorder(n1), [3,5,4,2,1])", "self.assertEqual(preorder(root), [0,1,2]) self.assertEqual(inorder(root), [1,0,2]) self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5", "stack[-1] if node.left and node.left not in visited: stack.append(node.left) continue if node not", "in visited: visited.add(node) res.append(node.v) if node.right and node.right not in visited: stack.append(node.right) continue", "and node.left not in visited: stack.append(node.left) continue if node not in visited: visited.add(node)", "node.left and node.left not in visited: stack.append(node.left) continue if node not in visited:", "[] while stack: node = stack[-1] if node.left and node.left not in visited:", "res = [] while stack: node = stack[-1] if node not in visited:", "set() res = [] while stack: node = stack[-1] if node not in", "def preorder(root): ''' return [node.v] ''' stack = [root] visited = set() res", "not in visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root): ''' return [node.v]", "preorder(root): ''' return [node.v] ''' stack = [root] visited = set() res =", "''' return [node.v] ''' stack = [root] visited = set() res = []", "Tree(5) n1.left = n2 n2.left = n3 n2.right = n4 n4.right = n5", "node.left and node.left not in visited: stack.append(node.left) continue if node.right and node.right not", "self.assertEqual(postorder(root), [1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3),", "not in visited: stack.append(node.left) continue if node.right and node.right not in visited: stack.append(node.right)", "= None def is_terminal(self): return self.left==None and self.right==None def preorder(root): ''' return [node.v]", "while stack: node = stack[-1] if node.left and node.left not in visited: stack.append(node.left)", "node.right not in visited: stack.append(node.right) continue if node not in visited: visited.add(node) res.append(node.v)", "test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5) n1.left", "n2 n2.left = n3 n2.right = n4 n4.right = n5 self.assertEqual(preorder(n1), [1,2,3,4,5]) self.assertEqual(inorder(n1),", "node not in visited: visited.add(node) res.append(node.v) if node.right and node.right not in visited:", "[1,2,0]) def test_dfs_and_bfs2(self): n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4),", "Tree: def __init__(self, v): self.v = v self.left = None self.right = None", "Tree(2), Tree(3), Tree(4), Tree(5) n1.left = n2 n2.left = n3 n2.right = n4", "v): self.v = v self.left = None self.right = None def is_terminal(self): return", "visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class XTest(unittest.TestCase): def test_sample1(self): root, left, right", "res class XTest(unittest.TestCase): def test_sample1(self): root, left, right = Tree(0), Tree(1), Tree(2) root.left,", "while stack: node = stack[-1] if node not in visited: visited.add(node) res.append(node.v) if", "import unittest class Tree: def __init__(self, v): self.v = v self.left = None", "res.append(node.v) if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res", "res def inorder(root): ''' return [node.v] ''' stack = [root] visited = set()", "node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def postorder(root):", "stack.append(node.right) continue if node not in visited: visited.add(node) res.append(node.v) stack.pop(-1) return res class", "if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res def", "[] while stack: node = stack[-1] if node not in visited: visited.add(node) res.append(node.v)", "continue if node.right and node.right not in visited: stack.append(node.right) continue stack.pop(-1) return res", "self.left==None and self.right==None def preorder(root): ''' return [node.v] ''' stack = [root] visited", "if node not in visited: visited.add(node) res.append(node.v) if node.left and node.left not in" ]
[ "get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE',", "'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self,", "1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC',", "'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name = name self.id = next(self.__id_generator)", "Subject: __id_generator = itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4',", "'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def", "'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name", "class Subject: __id_generator = itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3',", "'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name =", "def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem',", "'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3',", "return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio',", "'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name:", "itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo',", "import itertools class Subject: __id_generator = itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1',", "'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4']", "@staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys',", "= itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2',", "['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1',", "'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name = name", "'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2',", "itertools class Subject: __id_generator = itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2',", "'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name = name self.id =", "__id_generator = itertools.count(0, 1) @staticmethod def get_all_subjects(): return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1',", "'PreC', 'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str):", "'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4'] def __init__(self, name: str): self.name = name self.id" ]
[ "no duplicate keys in: \"+file) return data def composeCsv(data, target): lines = []", "\"216\", \"217\", \"218\", # Components - Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\",", "getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit()", "can be found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to a", "= cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1:", "-1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\") return", "= {} data[delimiter+'header'+delimiter] = header # store the header for future use doubles", "# Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt'", "doesn't exists.\") if not haserror: print(\"✔️ All files in column '\"+header+\"' exist.\") if", "unique IDs (the exported csv will only contain the first match) # -", "item ID(s) do not have a crafting document: \"+', '.join(missingDocs)) print(\" Items that", "which title it needs to look for when examining data. header = {}", "\"2008\", \"2009\", # Components - Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\",", "couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items, stored as item[id]", "not haserror: print(\"✔️ All files in column '\"+header+\"' exist.\") if __name__ == \"__main__\":", "in checkFileLinks(), unable to continue sanity check.\") return haserror = False for i", "\"2009\", # Components - Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\",", "\"206\", \"207\", \"208\", # Components - Class C \"209\", \"210\", \"211\", \"212\", \"213\",", "\"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life Support", "Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", #", "have a crafting document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable by design", "cleanLine(line, strip, delim): line = line.strip() if line == \"\": return line if", "itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\"", "set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for x in itemIDs if x", "\"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life Support - Water", "\"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\",", "a crafting document, but the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else:", "if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue", "doc image' # The item ID that a doc would craft: header['docItemId'] =", "writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n')", "\"2057\", \"2058\", \"2059\", # Components - Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\",", "of data to path def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='')", "Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\",", "lines) # Check itemData and docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2,", "'\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\") return itemIDs = [] for", "set(ignoreUncraftable) missingDocs = [x for x in itemIDs if x not in docSet", "\"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\",", "- Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", #", "itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use in the exported", "duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\")", "It will also do some sanity checking, which should be useful for mod", "- Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\",", "0: for id in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times", "= [] for i in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) #", "- Check if all items have a document # - Check if all", "Components - Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\",", "file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id", "# data is a dictionary-type, which is guarantueed to be ordered by insertion.", "len(duplicates) > 0: print(\"❌ The following item ID(s) have more than one crafting", "dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes a string and returns", "\"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\",", "\"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components - Class F \"2020\",", "Check if all documents point to an existing item # - Check if", "we find docIDs that are missing in itemIDs itemSet = set(itemIDs) missingItems =", "contain the first match) # - Every ID between items and documents needs", "The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate", "\"8\", \"9\", \"10\", # Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\",", "\"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components - Class B", "\"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\",", "Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life", "', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't locate", "a unique item.\") # We have 2 lists of IDs, find the IDs", "all documents point to an existing item # - Check if all documents", "line.strip() if line == \"\": return line if line[-1] == delim: line =", "def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for id in data1.keys() &", "information, as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This", "header for future use doubles = {} # stores the ID that are", "';') if line == \"\": continue # Ignore empty lines id = line[identifierIndex]", "probably won't need to change this, unless Momo changes this in an update.", "of the identifier from the header[list] def getIdentifierIndex(header, identifier): if identifier not in", "= line.strip() if line == \"\": return line if line[-1] == delim: line", "Components - Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\",", "identifier not in header: return -1 return header.index(identifier) def parseFile(file, identifier, ): lines", "the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys", "+ 1 if len(duplicates) > 0: for id in duplicates: print(\"❌ The unique", "These settings tell the script which title it needs to look for when", "\"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ] ## These settings", "the column header[itemId] has en entry in the column header[docItemId] def sanityCheck(items, itemHeader,", "for id in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in", "Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables", "= '9 CRAFTS ID' ### End of configuration ### ### Code starts here", "Finished writing: \"+path) # Takes a string and returns a list def cleanLine(line,", "encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data # Writes a list of", "docIDs if x not in itemSet] if len(missingItems) > 0: print(\"❌ The following", "duplicate, add the line. data[id] = line if len(doubles) > 0: for id", "os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which", "have a doc for crafting # - Check if the .png for an", "\"2056\", \"2057\", \"2058\", \"2059\", # Components - Class J \"2080\", \"2081\", \"2082\", \"400\",", "settings tell the script which title it needs to look for when examining", "findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for id in data1.keys() & data2.keys():", "header['docItemId'] = '9 CRAFTS ID' ### End of configuration ### ### Code starts", "- Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\",", "of documents (specs_database.txt) header['docId'] = '1 DOC ID' # Name of the item's", "import os # reads data from path def readFile(path): fh = open(path, 'r',", "data[delimiter+'header'+delimiter] = header # store the header for future use doubles = {}", "not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"',", "all items in docIDs and make sure they're unique seen = set() duplicates", "'1 DOC ID' # Name of the item's image header['itemImage'] = '6 icon", "'.join(missingDocs)) print(\" Items that are uncraftable by design can be added to the", "continue # Ignore empty lines id = line[identifierIndex] if id in data: #", "a document # - Check if all documents point to an existing item", "\"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life Support - Thermal \"660\", \"661\",", "No duplicate, add the line. data[id] = line if len(doubles) > 0: for", "else: # No duplicate, add the line. data[id] = line if len(doubles) >", "\"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\",", "script will warn) # - Warns when an item doesn't have a doc", "else: print(\"✔️ All documents have an existing item attached.\") def checkFileLinks(data, header): headerIdentifier", "Check if all items have a document # - Check if all documents", "\"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components - Class H \"2040\", \"2041\", \"2042\",", "= cleanLine(line, '\\t ', ';') if line == \"\": continue # Ignore empty", "this in an update. # Unique sorting key of items (items_database.txt) header['itemId'] =", "has en entry in the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier", "# Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\",", "Components - Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\",", "but the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents", "== delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not in duplicates else duplicates[id]", "-1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items, stored", "in itemIDs itemSet = set(itemIDs) missingItems = [x for x in docIDs if", "# - Check if all items have a document # - Check if", "G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components", "files in column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"])", "data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not in", "Let's go over all items in docIDs and make sure they're unique seen", "Item id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️", "comments line = cleanLine(line, '\\t ', ';') if line == \"\": continue #", "data2): duplicates = {} for id in data1.keys() & data2.keys(): if id ==", "# Components - Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\",", "store the header for future use doubles = {} # stores the ID", "from the header[list] def getIdentifierIndex(header, identifier): if identifier not in header: return -1", "Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\",", "start of a file def getHeader(data): for idx, line in enumerate(data): if line[:2]", "path def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='') for line in", "IDs that don't have a crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\",", "\"+path) # Takes a string and returns a list def cleanLine(line, strip, delim):", "duplicate keys in: \"+file) return data def composeCsv(data, target): lines = [] for", "Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life", "# Unique sorting key of documents (specs_database.txt) header['docId'] = '1 DOC ID' #", "\"9\", \"10\", # Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\",", "header[itemId] has en entry in the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader):", "by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData", "image header['itemImage'] = '6 icon image' # Name of the document's image header['docImage']", "in line.split(delim)] # Finds the header, which is the last commented line at", "file)): haserror = True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which doesn't", "to change this, unless Momo changes this in an update. # Unique sorting", "item ID that a doc would craft: header['docItemId'] = '9 CRAFTS ID' ###", "if line == \"\": return line if line[-1] == delim: line = line[0:-1]", "in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes a string and", "= set(ignoreUncraftable) missingDocs = [x for x in itemIDs if x not in", "# Life Support - Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\",", "\"2026\", \"2027\", \"2028\", \"2029\", # Components - Class G \"2030\", \"2031\", \"2032\", \"2033\",", "Components - Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\",", "in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for x", "\"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\"", "\"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components - Class", "The item ID that a doc would craft: header['docItemId'] = '9 CRAFTS ID'", "\"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\" ,", "unique item.\") # We have 2 lists of IDs, find the IDs from", "if len(duplicates) > 0: print(\"❌ The following item ID(s) have more than one", "if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in", "- Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/'", "\"646\", \"647\", \"648\", \"649\", # Life Support - Thermal \"660\", \"661\", \"662\", \"663\",", "c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' #", "\"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\",", "= line if len(doubles) > 0: for id in doubles: print(\"❌ The unique", "continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item", "'\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData for duplicate IDs def", "\"159\", \"165\", \"166\", \"167\", \"169\", # Components - Class B \"170\", \"200\", \"201\",", "\"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components - Class A \"160\",", "the index of the identifier from the header[list] def getIdentifierIndex(header, identifier): if identifier", "header, which is the last commented line at the start of a file", "writeFile(target, lines) # Check itemData and docData for duplicate IDs def findDuplicateEntries(fn1, data1,", "0: print(\"❌ The following item ID(s) do not have a crafting document: \"+',", "needs to be unique (the script will warn) # - Warns when an", "en entry in the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier =", "that are duplicates for line in lines: if line[:2] == '//': continue #", "\"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\",", "\"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\",", "IDs, find the IDs from itemIDS that are missing in docIDs docSet =", "\"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components - Class C \"209\",", "# Name of the item's image header['itemImage'] = '6 icon image' # Name", "unique seen = set() duplicates = [x for x in docIDs if x", "itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') #", "Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\",", "= 2 if id not in doubles else doubles[id] + 1 else: #", "\"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\",", "\"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\",", "\"158\", \"168\", # Components - Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\",", "# Life Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\",", "# Life Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\",", "duplicates else duplicates[id] + 1 if len(duplicates) > 0: for id in duplicates:", "\"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components -", "# Takes a string and returns a list def cleanLine(line, strip, delim): line", "Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", #", "were found. The script will only use the first match per duplicate.\") print(\"------------------------------\")", "https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to a csv: # - MOD/items/items_database.txt", "data: if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)):", "\"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", #", "\"737\", \"738\", # Consumables ] ## These settings tell the script which title", "\"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components - Class", "def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1:", "which is the last commented line at the start of a file def", "-1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue sanity check.\") return", "to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a crafting", "\"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\",", "header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier ==", "keys in: \"+file) return data def composeCsv(data, target): lines = [] for item", "than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to", "'//': return data[idx-1][2:] # Gets the index of the identifier from the header[list]", "to use in the exported csv delimiter = ';' # List of item", "if id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not in duplicates", "couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\") return itemIDs =", "missingItems = [x for x in docIDs if x not in itemSet] if", "readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if", "continue # Ignore comments line = cleanLine(line, '\\t ', ';') if line ==", "print(\"❌ The following item ID(s) have more than one crafting document: \"+', '.join(duplicates))", "\"155\", \"156\", \"157\", \"158\", \"168\", # Components - Class A \"160\", \"161\", \"162\",", "= '1 ITEM ID' # Unique sorting key of documents (specs_database.txt) header['docId'] =", "\"+fn1+\" and \"+fn2+\".\") # Checks that the column header[itemId] has en entry in", "sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't", "lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData for duplicate IDs def findDuplicateEntries(fn1,", "doubles[id] + 1 else: # No duplicate, add the line. data[id] = line", "in itemSet] if len(missingItems) > 0: print(\"❌ The following item ID(s) have a", "- Check if all documents point to an existing item # - Check", "= line[identifierIndex] if id in data: # Duplicate checking doubles[id] = 2 if", "- Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\",", "if all items have a document # - Check if all documents point", "Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components - Class M", "This script exports 2 files to a csv: # - MOD/items/items_database.txt -> itemdb.csv", "csv will only contain the first match) # - Every ID between items", "check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate", "and make sure they're unique seen = set() duplicates = [x for x", "docfile, docData) # Sanity checks: # - Check if all items have a", "- Check if the .png for an item/doc exists # - Orphaned documents", "cleanLine(line, '\\t ', ';') if line == \"\": continue # Ignore empty lines", "\"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life Support - Thermal \"660\",", "\"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources - Loot \"29\", \"100\", \"101\",", "if x not in itemSet] if len(missingItems) > 0: print(\"❌ The following item", "else: print(\"✔️ All items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\")", "duplicates = [x for x in docIDs if x in seen or seen.add(x)]", "can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items", "files to a csv: # - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt ->", "craft: header['docItemId'] = '9 CRAFTS ID' ### End of configuration ### ### Code", "insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData for", "first match) # - Every ID between items and documents needs to be", "### ### Code starts here ### import os # reads data from path", "the start of a file def getHeader(data): for idx, line in enumerate(data): if", "not in itemSet] if len(missingItems) > 0: print(\"❌ The following item ID(s) have", "were no duplicate keys in: \"+file) return data def composeCsv(data, target): lines =", "\"2036\", \"2037\", \"2038\", \"2039\", # Components - Class H \"2040\", \"2041\", \"2042\", \"2043\",", "findMissing(), unable to continue sanity check.\") return itemIDs = [] for i in", "x not in ignoreSet] if len(missingDocs) > 0: print(\"❌ The following item ID(s)", "unique (the script will warn) # - Warns when an item doesn't have", "\"\": return line if line[-1] == delim: line = line[0:-1] return [x.strip(strip) for", "Components - Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\",", "# Components - Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\",", "# Components - Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components", "the last commented line at the start of a file def getHeader(data): for", "continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑", "\"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources -", "list of data to path def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8',", "\"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\",", "Check itemData and docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates", "ID between items and documents needs to be unique (the script will warn)", "= [] for i in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs", "= getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\")", "id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️ All", "- Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", #", "header['itemImage'] = '6 icon image' # Name of the document's image header['docImage'] =", "docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to", "from path def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines()", "x in line.split(delim)] # Finds the header, which is the last commented line", "if id not in duplicates else duplicates[id] + 1 if len(duplicates) > 0:", "header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"'", "\"205\", \"206\", \"207\", \"208\", # Components - Class C \"209\", \"210\", \"211\", \"212\",", "data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if id", "in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a crafting document attached (with", "ITEM ID' # Unique sorting key of documents (specs_database.txt) header['docId'] = '1 DOC", "= False for i in data: if i == delimiter+'header'+delimiter: continue file =", "a crafting document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable by design can", "existing item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier ==", "header: return -1 return header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file))", "documents point to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if", "document # - Check if all documents point to an existing item #", "this, unless Momo changes this in an update. # Unique sorting key of", "Every ID between items and documents needs to be unique (the script will", "= True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if", "some sanity checking, which should be useful for mod & modpack creators: #", "# Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ] ##", "] ## These settings tell the script which title it needs to look", "= parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData for duplicate IDs", "\"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components - Class F", "dictionary-type, which is guarantueed to be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"')", "data from path def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data =", "#!/usr/bin/python3 # More information, as well as the (non)licence can be found at:", "image header['docImage'] = '6 doc image' # The item ID that a doc", "\"606\", \"607\", \"608\", \"609\", # Life Support - Food \"620\", \"621\", \"622\", \"623\",", "continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in docIDs and make sure", "uncraftable by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else:", "line == \"\": return line if line[-1] == delim: line = line[0:-1] return", "print(\" Items that are uncraftable by design can be added to the 'ignoreUncraftable'-list", "docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in", "== \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData,", "source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter", "Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", #", "lines id = line[identifierIndex] if id in data: # Duplicate checking doubles[id] =", "i in data: if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not", "\"2017\", \"2018\", \"2019\", # Components - Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\",", "sanity check.\") return haserror = False for i in data: if i ==", "Life Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\",", "\"607\", \"608\", \"609\", # Life Support - Food \"620\", \"621\", \"622\", \"623\", \"624\",", "[x.strip(strip) for x in line.split(delim)] # Finds the header, which is the last", "if line[:2] == '//': continue # Ignore comments line = cleanLine(line, '\\t ',", "to path def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='') for line", "\"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components - Class", "\"669\", # Life Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\",", "CRAFTS ID' ### End of configuration ### ### Code starts here ### import", "\"666\", \"667\", \"668\", \"669\", # Life Support - Waste \"690\", \"670\", \"671\", \"691\",", "line at the start of a file def getHeader(data): for idx, line in", "data is a dictionary-type, which is guarantueed to be ordered by insertion. joiner", "= '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData for duplicate IDs", "newline='\\n') data = fh.readlines() fh.close() return data # Writes a list of data", "doubles = {} # stores the ID that are duplicates for line in", "\"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\",", "\"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components -", "return haserror = False for i in data: if i == delimiter+'header'+delimiter: continue", "in docIDs if x not in itemSet] if len(missingItems) > 0: print(\"❌ The", "itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs: if i == delimiter+'header'+delimiter: continue", "__name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"])", "# - Check if the .png for an item/doc exists # - Orphaned", "> 0: print(\"❌ The following item ID(s) have more than one crafting document:", "id in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] = 2", "'1 ITEM ID' # Unique sorting key of documents (specs_database.txt) header['docId'] = '1", "i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror =", "More information, as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium #", "column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv')", "item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1:", "\"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components - Class", "won't need to change this, unless Momo changes this in an update. #", "# Parse the items, stored as item[id] data = {} data[delimiter+'header'+delimiter] = header", "in enumerate(data): if line[:2] != '//': return data[idx-1][2:] # Gets the index of", "= set() duplicates = [x for x in docIDs if x in seen", "when examining data. header = {} # You probably won't need to change", "line = line.strip() if line == \"\": return line if line[-1] == delim:", "(non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to", "delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in docIDs and make", "the document's image header['docImage'] = '6 doc image' # The item ID that", "\"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ] ## These settings tell", "\"2019\", # Components - Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\",", "\"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ] ## These settings tell the", "H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components", "\"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\",", "data[idx-1][2:] # Gets the index of the identifier from the header[list] def getIdentifierIndex(header,", "crafting document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable by design can be", "\"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components - Class H \"2040\",", "per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys in: \"+file) return", "in an update. # Unique sorting key of items (items_database.txt) header['itemId'] = '1", "for x in docIDs if x in seen or seen.add(x)] if len(duplicates) >", "to a csv: # - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv", "Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use in", "findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier", "Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", #", "### import os # reads data from path def readFile(path): fh = open(path,", "to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1:", "Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile", "identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The script will", "doesn't have a doc for crafting # - Check if the .png for", "'\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found", "x in docIDs if x in seen or seen.add(x)] if len(duplicates) > 0:", "not in duplicates else duplicates[id] + 1 if len(duplicates) > 0: for id", "if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs:", "ID' # Name of the item's image header['itemImage'] = '6 icon image' #", "x not in itemSet] if len(missingItems) > 0: print(\"❌ The following item ID(s)", "data. header = {} # You probably won't need to change this, unless", "identifier, ): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex", "def cleanLine(line, strip, delim): line = line.strip() if line == \"\": return line", "\"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components - Class F \"2020\", \"2021\", \"2022\",", "Check if the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\")", "\"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\",", "def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't", "ID(s) have more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All", "item in data: # data is a dictionary-type, which is guarantueed to be", "== delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True", "2 if id not in duplicates else duplicates[id] + 1 if len(duplicates) >", "'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a crafting document attached", "following item ID(s) have more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else:", "fh.close() print(\"✔️ Finished writing: \"+path) # Takes a string and returns a list", "in doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates", "no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that the column header[itemId]", ".png for an item/doc exists # - Orphaned documents # Example for windows:", "from itemIDS that are missing in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable)", "I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components", "make sure they're unique seen = set() duplicates = [x for x in", "# Components - Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\",", "stored as item[id] data = {} data[delimiter+'header'+delimiter] = header # store the header", "# We have 2 lists of IDs, find the IDs from itemIDS that", "B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components", "# Check if the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"])", "= '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to", "The script will only use the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️", "docfile = 'specs_database.txt' # Delimiter to use in the exported csv delimiter =", "a crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",", "# Life Support - Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\",", "ID that are duplicates for line in lines: if line[:2] == '//': continue", "C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components", "# No duplicate, add the line. data[id] = line if len(doubles) > 0:", "item.\") # We have 2 lists of IDs, find the IDs from itemIDS", "getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable", "times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\" and", "script exports 2 files to a csv: # - MOD/items/items_database.txt -> itemdb.csv #", "= '1 DOC ID' # Name of the item's image header['itemImage'] = '6", "Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", #", "'\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️ All files in column '\"+header+\"'", "documents have an existing item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header)", "\"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\",", "\"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\",", "\"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\",", "found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys", "# Sanity checks: # - Check if all items have a document #", "\"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\",", "\"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys across: \"+fn1+\"", "\"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials \"121\",", "# Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\",", "# - MOD/items/specs_database.txt -> docdb.csv # It will also do some sanity checking,", "Consumables ] ## These settings tell the script which title it needs to", "\"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\",", "- Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", #", "lines: if line[:2] == '//': continue # Ignore comments line = cleanLine(line, '\\t", "identifier) if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() #", "and documents needs to be unique (the script will warn) # - Warns", "print(\"✔️ Finished writing: \"+path) # Takes a string and returns a list def", "\"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components - Class", "\"24\", \"25\", \"26\", \"27\", \"28\", # Resources - Loot \"29\", \"100\", \"101\", \"103\",", "for x in line.split(delim)] # Finds the header, which is the last commented", "line[:2] == '//': continue # Ignore comments line = cleanLine(line, '\\t ', ';')", "\"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\",", "\"215\", \"216\", \"217\", \"218\", # Components - Class D \"219\", \"2001\", \"2002\", \"2003\",", "== \"\": continue # Ignore empty lines id = line[identifierIndex] if id in", "'\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The script will only", "2 lists of IDs, find the IDs from itemIDS that are missing in", "header['docImage'] = '6 doc image' # The item ID that a doc would", "lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header,", "\"608\", \"609\", # Life Support - Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\",", "\"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components - Class J \"2080\", \"2081\",", "itemData and docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates =", "it needs to look for when examining data. header = {} # You", "identifier from the header[list] def getIdentifierIndex(header, identifier): if identifier not in header: return", "line in lines: if line[:2] == '//': continue # Ignore comments line =", "by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️", "= 2 if id not in duplicates else duplicates[id] + 1 if len(duplicates)", "Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\",", "'\"+source+\"'\") quit() # Parse the items, stored as item[id] data = {} data[delimiter+'header'+delimiter]", "\"627\", \"628\", \"629\", # Life Support - Water \"640\", \"641\", \"642\", \"643\", \"644\",", "duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys in: \"+file) return data", "# Components - Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\",", "\"+fn2+\".\") # Checks that the column header[itemId] has en entry in the column", "\"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\")", "attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check, we find docIDs", "match) # - Every ID between items and documents needs to be unique", "# Ignore comments line = cleanLine(line, '\\t ', ';') if line == \"\":", "'\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️ All files", "that don't have a crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\",", "\"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components -", "[x for x in docIDs if x in seen or seen.add(x)] if len(duplicates)", "unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs", "entry in the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter],", "\"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources - Loot \"29\",", "Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade", "crafting # - Check if the .png for an item/doc exists # -", "& modpack creators: # - Each file can only contain unique IDs (the", "\"690\", \"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\",", "in doubles else doubles[id] + 1 else: # No duplicate, add the line.", "\"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components - Class I", "Check if the .png for an item/doc exists # - Orphaned documents #", "at the start of a file def getHeader(data): for idx, line in enumerate(data):", "return itemIDs = [] for i in items: if i == delimiter+'header'+delimiter: continue", "2 files to a csv: # - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt", "use doubles = {} # stores the ID that are duplicates for line", "# Writes a list of data to path def writeFile(path, dataList): fh =", "\"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an existing item attached.\") def", "do not have a crafting document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable", "a string and returns a list def cleanLine(line, strip, delim): line = line.strip()", "one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to a", "an existing item # - Check if all documents point to a unique", "\"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components -", "documents point to a unique item.\") # We have 2 lists of IDs,", "Components - Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\",", "for i in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = []", "\"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components -", "IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: # - Check if all", "F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components", "\"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\",", "print(\"✔️ All documents have an existing item attached.\") def checkFileLinks(data, header): headerIdentifier =", "\"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\",", "line[identifierIndex] if id in data: # Duplicate checking doubles[id] = 2 if id", "= line[0:-1] return [x.strip(strip) for x in line.split(delim)] # Finds the header, which", "if len(missingDocs) > 0: print(\"❌ The following item ID(s) do not have a", "only use the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no", "the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if", "[] for item in data: # data is a dictionary-type, which is guarantueed", "which is guarantueed to be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target,", "\"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life Support - Food \"620\",", "a file def getHeader(data): for idx, line in enumerate(data): if line[:2] != '//':", "= readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier)", "crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check, we", "{} # stores the ID that are duplicates for line in lines: if", "if the .png for an item/doc exists # - Orphaned documents # Example", "\"605\", \"606\", \"607\", \"608\", \"609\", # Life Support - Food \"620\", \"621\", \"622\",", "writing: \"+path) # Takes a string and returns a list def cleanLine(line, strip,", "\"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components -", "[] for i in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's", "identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in", "between items and documents needs to be unique (the script will warn) #", "header # store the header for future use doubles = {} # stores", "= {} # stores the ID that are duplicates for line in lines:", "identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were", "\"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components - Class M \"302\", \"300\",", "to a unique item.\") # We have 2 lists of IDs, find the", "a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png for", "docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity", "\"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components - Class H", "\"2047\", \"2048\", \"2049\", # Components - Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\",", "\"736\", \"737\", \"738\", # Consumables ] ## These settings tell the script which", "key of documents (specs_database.txt) header['docId'] = '1 DOC ID' # Name of the", "# More information, as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium", "data: # data is a dictionary-type, which is guarantueed to be ordered by", "lists of IDs, find the IDs from itemIDS that are missing in docIDs", "else: print(\"✔️ There were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks", "missing in itemIDs itemSet = set(itemIDs) missingItems = [x for x in docIDs", "\"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\",", "to be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check", "joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and docData for duplicate", "idx, line in enumerate(data): if line[:2] != '//': return data[idx-1][2:] # Gets the", "string and returns a list def cleanLine(line, strip, delim): line = line.strip() if", "print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items, stored as", "document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",", "in header: return -1 return header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source,", "\"2037\", \"2038\", \"2039\", # Components - Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\",", "have a crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\",", "index of the identifier from the header[list] def getIdentifierIndex(header, identifier): if identifier not", "+ 1 else: # No duplicate, add the line. data[id] = line if", "useful for mod & modpack creators: # - Each file can only contain", "in seen or seen.add(x)] if len(duplicates) > 0: print(\"❌ The following item ID(s)", "# Components - Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\",", "ignoreSet = set(ignoreUncraftable) missingDocs = [x for x in itemIDs if x not", "[] for i in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs =", "for i in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go", "have 2 lists of IDs, find the IDs from itemIDS that are missing", "\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources - Raw \"11\",", "readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data", "in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all", "Items that are uncraftable by design can be added to the 'ignoreUncraftable'-list in", "\"156\", \"157\", \"158\", \"168\", # Components - Class A \"160\", \"161\", \"162\", \"163\",", "line in enumerate(data): if line[:2] != '//': return data[idx-1][2:] # Gets the index", "# store the header for future use doubles = {} # stores the", "End of configuration ### ### Code starts here ### import os # reads", "not in doubles else doubles[id] + 1 else: # No duplicate, add the", "following item ID(s) have a crafting document, but the item does not exist:", "\"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources - Loot", "\"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\",", "that are missing in itemIDs itemSet = set(itemIDs) missingItems = [x for x", "in the column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader)", "== -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items,", "delim: line = line[0:-1] return [x.strip(strip) for x in line.split(delim)] # Finds the", "of configuration ### ### Code starts here ### import os # reads data", "to continue sanity check.\") return itemIDs = [] for i in items: if", "fh.close() return data # Writes a list of data to path def writeFile(path,", "items, stored as item[id] data = {} data[delimiter+'header'+delimiter] = header # store the", "\"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The script will only use the", "for item in data: # data is a dictionary-type, which is guarantueed to", "Components - Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\",", "also do some sanity checking, which should be useful for mod & modpack", "\"2007\", \"2008\", \"2009\", # Components - Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\",", "unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png for an", "(with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check, we find docIDs that", "point to an existing item # - Check if all documents point to", "M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade", "in: \"+file) return data def composeCsv(data, target): lines = [] for item in", "for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All done. Press enter", "\"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life Support", "# - Every ID between items and documents needs to be unique (the", "check.\") return itemIDs = [] for i in items: if i == delimiter+'header'+delimiter:", "itemIDs = [] for i in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier])", "): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex =", "\"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components - Class J", "Momo changes this in an update. # Unique sorting key of items (items_database.txt)", "Parse the items, stored as item[id] data = {} data[delimiter+'header'+delimiter] = header #", "needs to look for when examining data. header = {} # You probably", "\"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components - Class", "across: \"+fn1+\" and \"+fn2+\".\") # Checks that the column header[itemId] has en entry", "need to change this, unless Momo changes this in an update. # Unique", "Gets the index of the identifier from the header[list] def getIdentifierIndex(header, identifier): if", "1 else: # No duplicate, add the line. data[id] = line if len(doubles)", "have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned", "have a document # - Check if all documents point to an existing", "- Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\",", "\"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life Support -", "id in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\"", "item's image header['itemImage'] = '6 icon image' # Name of the document's image", "\"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life Support -", "\"7\", \"8\", \"9\", \"10\", # Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\",", "item # - Check if all documents point to a unique item sanityCheck(itemData,", "\"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods", "DOC ID' # Name of the item's image header['itemImage'] = '6 icon image'", "are missing in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x", "have a crafting document, but the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\")", "\"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components -", "exports 2 files to a csv: # - MOD/items/items_database.txt -> itemdb.csv # -", "- Each file can only contain unique IDs (the exported csv will only", "doubles[id] = 2 if id not in doubles else doubles[id] + 1 else:", "print(\"✔️ There were no duplicate keys in: \"+file) return data def composeCsv(data, target):", "a csv: # - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv #", "find the IDs from itemIDS that are missing in docIDs docSet = set(docIDs)", "> 0: for id in doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\"", "do some sanity checking, which should be useful for mod & modpack creators:", "delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs: if i ==", "were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate", "document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to a unique item.\")", "commented line at the start of a file def getHeader(data): for idx, line", "for x in itemIDs if x not in docSet and x not in", "def composeCsv(data, target): lines = [] for item in data: # data is", "return header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines),", "i in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for", "docIDs that are missing in itemIDs itemSet = set(itemIDs) missingItems = [x for", "\"109\", \"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\",", "Life Support - Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\",", "if not haserror: print(\"✔️ All files in column '\"+header+\"' exist.\") if __name__ ==", "be unique (the script will warn) # - Warns when an item doesn't", "\"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components - Class D", ".png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All done. Press", "add the line. data[id] = line if len(doubles) > 0: for id in", "\"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life Support", "\"2059\", # Components - Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", #", "\"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components - Class E \"2010\", \"2011\", \"2012\",", "line = line[0:-1] return [x.strip(strip) for x in line.split(delim)] # Finds the header,", "All documents point to a unique item.\") # We have 2 lists of", "= fh.readlines() fh.close() return data # Writes a list of data to path", "data = {} data[delimiter+'header'+delimiter] = header # store the header for future use", "set() duplicates = [x for x in docIDs if x in seen or", "# Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\",", "don't have a crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\",", "links to '\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️ All files in", "- MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It will also", "x in docIDs if x not in itemSet] if len(missingItems) > 0: print(\"❌", "if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile,", "print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\") return itemIDs", "creators: # - Each file can only contain unique IDs (the exported csv", "sure they're unique seen = set() duplicates = [x for x in docIDs", "checks: # - Check if all items have a document # - Check", "\"165\", \"166\", \"167\", \"169\", # Components - Class B \"170\", \"200\", \"201\", \"202\",", "delim): line = line.strip() if line == \"\": return line if line[-1] ==", "\"210\", \"211\", \"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components - Class", "- Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", #", "= getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(),", "'9 CRAFTS ID' ### End of configuration ### ### Code starts here ###", "seen.add(x)] if len(duplicates) > 0: print(\"❌ The following item ID(s) have more than", "- Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\",", "'6 icon image' # Name of the document's image header['docImage'] = '6 doc", "= [x for x in itemIDs if x not in docSet and x", "image' # Name of the document's image header['docImage'] = '6 doc image' #", "D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components", "\"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components - Class A \"160\", \"161\",", "print(\"❌ Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There", "duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: # - Check if", "header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t", "itemData, docfile, docData) # Sanity checks: # - Check if all items have", "\"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\",", "\"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components - Class B \"170\", \"200\",", "\"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components - Class", "header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex ==", "if x not in docSet and x not in ignoreSet] if len(missingDocs) >", "starts here ### import os # reads data from path def readFile(path): fh", "# Checks that the column header[itemId] has en entry in the column header[docItemId]", "# - Warns when an item doesn't have a doc for crafting #", "return [x.strip(strip) for x in line.split(delim)] # Finds the header, which is the", "stores the ID that are duplicates for line in lines: if line[:2] ==", "# You probably won't need to change this, unless Momo changes this in", "\"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods", "to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png", "[ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources", "\"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\",", "documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile =", "\"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life Support - Waste \"690\",", "\"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\")", "crafting document, but the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️", "\"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods", "items and documents needs to be unique (the script will warn) # -", "duplicates[id] + 1 if len(duplicates) > 0: for id in duplicates: print(\"❌ The", "header = {} # You probably won't need to change this, unless Momo", "will warn) # - Warns when an item doesn't have a doc for", "Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use in the", "\"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials", "and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else:", "# This script exports 2 files to a csv: # - MOD/items/items_database.txt ->", "{} # You probably won't need to change this, unless Momo changes this", "\"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components - Class E", "## These settings tell the script which title it needs to look for", "os # reads data from path def readFile(path): fh = open(path, 'r', encoding='utf8',", "for id in doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\")", "docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for x in itemIDs", "print(\"✔️ All files in column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData =", "of items (items_database.txt) header['itemId'] = '1 ITEM ID' # Unique sorting key of", "0: for id in doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different", "duplicates = {} for id in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter:", "item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All done. Press enter to exit.\")", "- Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\",", "for x in docIDs if x not in itemSet] if len(missingItems) > 0:", "as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2", "= '6 icon image' # Name of the document's image header['docImage'] = '6", "You probably won't need to change this, unless Momo changes this in an", "\"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", # Components - Class H \"2040\", \"2041\",", "docIDs = [] for i in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier])", "= [x for x in docIDs if x not in itemSet] if len(missingItems)", "if len(missingItems) > 0: print(\"❌ The following item ID(s) have a crafting document,", "contain unique IDs (the exported csv will only contain the first match) #", "docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in docIDs and make sure they're", "to continue sanity check.\") return haserror = False for i in data: if", "warn) # - Warns when an item doesn't have a doc for crafting", "\"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\",", "column header[docItemId] def sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier", "# It will also do some sanity checking, which should be useful for", "\"645\", \"646\", \"647\", \"648\", \"649\", # Life Support - Thermal \"660\", \"661\", \"662\",", "\"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\",", "All files in column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile,", "the line. data[id] = line if len(doubles) > 0: for id in doubles:", "Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", #", "> 0: for id in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\"", "\"402\", # Components - Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\",", "# Components - Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\",", "the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All done.", "print(\"------------------------------\") else: print(\"✔️ All items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored", "of item IDs that don't have a crafting document ignoreUncraftable = [ \"1\",", "delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌", "\"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\",", "parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData", "\"626\", \"627\", \"628\", \"629\", # Life Support - Water \"640\", \"641\", \"642\", \"643\",", "fh.readlines() fh.close() return data # Writes a list of data to path def", "# Duplicate checking doubles[id] = 2 if id not in doubles else doubles[id]", "\"10\", # Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\",", "are uncraftable by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\")", "# Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", #", "checkFileLinks(), unable to continue sanity check.\") return haserror = False for i in", "of the item's image header['itemImage'] = '6 icon image' # Name of the", "len(duplicates) > 0: for id in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched", "id = line[identifierIndex] if id in data: # Duplicate checking doubles[id] = 2", "getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable", "matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across", "in column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData,", "= '6 doc image' # The item ID that a doc would craft:", "and \"+fn2+\".\") # Checks that the column header[itemId] has en entry in the", "update. # Unique sorting key of items (items_database.txt) header['itemId'] = '1 ITEM ID'", "and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: #", "lines.\") print(\"❌ Duplicates were found. The script will only use the first match", "locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\") return itemIDs = []", "haserror = False for i in data: if i == delimiter+'header'+delimiter: continue file", "column header[itemId] has en entry in the column header[docItemId] def sanityCheck(items, itemHeader, docs,", "delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not in duplicates else duplicates[id] +", "the orphaned check, we find docIDs that are missing in itemIDs itemSet =", "[x for x in docIDs if x not in itemSet] if len(missingItems) >", "the header for future use doubles = {} # stores the ID that", "len(missingDocs) > 0: print(\"❌ The following item ID(s) do not have a crafting", "for future use doubles = {} # stores the ID that are duplicates", "unable to continue sanity check.\") return itemIDs = [] for i in items:", "and docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {}", "identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the", "\"166\", \"167\", \"169\", # Components - Class B \"170\", \"200\", \"201\", \"202\", \"203\",", "print(\"❌ Duplicates were found. The script will only use the first match per", "for an item/doc exists # - Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\", "\"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials \"105\", \"106\",", "line == \"\": continue # Ignore empty lines id = line[identifierIndex] if id", "print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if not haserror:", "\"2058\", \"2059\", # Components - Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\",", "newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes", "\"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\",", "print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌", "for crafting # - Check if the .png for an item/doc exists #", "items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in", "for mod & modpack creators: # - Each file can only contain unique", "= parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check", "if x in seen or seen.add(x)] if len(duplicates) > 0: print(\"❌ The following", "of a file def getHeader(data): for idx, line in enumerate(data): if line[:2] !=", ", \"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\",", "a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check,", "'.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an existing item attached.\") def checkFileLinks(data,", "There were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that the", "matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The script will only use", "print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue sanity check.\") return haserror", "x in itemIDs if x not in docSet and x not in ignoreSet]", "J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components - Class M \"302\",", "\"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components - Class J \"2080\", \"2081\", \"2082\",", "title it needs to look for when examining data. header = {} #", "the header[list] def getIdentifierIndex(header, identifier): if identifier not in header: return -1 return", "path def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close()", "\"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components - Class A", "\"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\", #", "haserror = True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\")", "\"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\",", "# stores the ID that are duplicates for line in lines: if line[:2]", "\"28\", # Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\",", "return -1 return header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header", "(specs_database.txt) header['docId'] = '1 DOC ID' # Name of the item's image header['itemImage']", "Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", #", "= 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use in the exported csv", "if identifier not in header: return -1 return header.index(identifier) def parseFile(file, identifier, ):", "print(\"✔️ There were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that", "\"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check, we find docIDs that are", "\"2016\", \"2017\", \"2018\", \"2019\", # Components - Class F \"2020\", \"2021\", \"2022\", \"2023\",", "documents (specs_database.txt) header['docId'] = '1 DOC ID' # Name of the item's image", "encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) #", "script which title it needs to look for when examining data. header =", "\"168\", # Components - Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\",", "ID(s) have a crafting document, but the item does not exist: \"+', '.join(missingItems))", "# Resources - Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\",", "documents needs to be unique (the script will warn) # - Warns when", "\"629\", # Life Support - Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\",", "### End of configuration ### ### Code starts here ### import os #", "docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate", "duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for id in", "\"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\",", "- Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", #", "{} for id in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id]", "file can only contain unique IDs (the exported csv will only contain the", "'\"+header+\"' exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData", "parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile,", "\"2029\", # Components - Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\",", "\"+str(duplicates[id])+\" times in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\"", "len(missingItems) > 0: print(\"❌ The following item ID(s) have a crafting document, but", "\"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components - Class J \"2080\",", "= {} # You probably won't need to change this, unless Momo changes", "in data: # Duplicate checking doubles[id] = 2 if id not in doubles", "item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an", "All items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For", "configuration ### ### Code starts here ### import os # reads data from", "\"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\",", "exist.\") if __name__ == \"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData =", "in duplicates else duplicates[id] + 1 if len(duplicates) > 0: for id in", "the .png for an item/doc exists # - Orphaned documents # Example for", "is guarantueed to be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines)", "\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources - Raw", "\"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\",", "\"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components -", "list def cleanLine(line, strip, delim): line = line.strip() if line == \"\": return", "sorting key of documents (specs_database.txt) header['docId'] = '1 DOC ID' # Name of", "return data # Writes a list of data to path def writeFile(path, dataList):", "\"2039\", # Components - Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\",", "-1 return header.index(identifier) def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header =", "point to a unique item.\") # We have 2 lists of IDs, find", "if all documents point to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) #", "- Warns when an item doesn't have a doc for crafting # -", "itemSet] if len(missingItems) > 0: print(\"❌ The following item ID(s) have a crafting", "Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life", "\"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\",", "docIDs and make sure they're unique seen = set() duplicates = [x for", "\"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\",", "'\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader)", "(items_database.txt) header['itemId'] = '1 ITEM ID' # Unique sorting key of documents (specs_database.txt)", "not in docSet and x not in ignoreSet] if len(missingDocs) > 0: print(\"❌", "Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life", "is the last commented line at the start of a file def getHeader(data):", "'\\t ', ';') if line == \"\": continue # Ignore empty lines id", "cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑", "enumerate(data): if line[:2] != '//': return data[idx-1][2:] # Gets the index of the", "point to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the", "E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components", "= {} for id in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue", "else: print(\"✔️ There were no duplicate keys in: \"+file) return data def composeCsv(data,", "in the exported csv delimiter = ';' # List of item IDs that", "\"665\", \"666\", \"667\", \"668\", \"669\", # Life Support - Waste \"690\", \"670\", \"671\",", "\"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\",", "docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: # -", "\"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components - Class C", "ignoreSet] if len(missingDocs) > 0: print(\"❌ The following item ID(s) do not have", "\"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\",", "'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use in the exported csv delimiter", "across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys across:", "def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='') for line in dataList:", "data[id] = line if len(doubles) > 0: for id in doubles: print(\"❌ The", "data to path def writeFile(path, dataList): fh = open(path, 'w', encoding='utf8', newline='') for", "if the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All", "\"649\", # Life Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\",", "# For the orphaned check, we find docIDs that are missing in itemIDs", "docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"'", "item IDs that don't have a crafting document ignoreUncraftable = [ \"1\", \"2\",", "line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes a string", "uncraftables).\") # For the orphaned check, we find docIDs that are missing in", "x not in docSet and x not in ignoreSet] if len(missingDocs) > 0:", "IDs from itemIDS that are missing in docIDs docSet = set(docIDs) ignoreSet =", "item doesn't have a doc for crafting # - Check if the .png", "continue sanity check.\") return haserror = False for i in data: if i", "\"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\",", "data: # Duplicate checking doubles[id] = 2 if id not in doubles else", "that the column header[itemId] has en entry in the column header[docItemId] def sanityCheck(items,", "exported csv delimiter = ';' # List of item IDs that don't have", "\"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components - Class I \"2050\",", "Unique sorting key of documents (specs_database.txt) header['docId'] = '1 DOC ID' # Name", "if all documents point to an existing item # - Check if all", "Ignore empty lines id = line[identifierIndex] if id in data: # Duplicate checking", "'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data # Writes a list", "and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys across: \"+fn1+\" and", "item ID(s) have more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️", "in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if", "= ';' # List of item IDs that don't have a crafting document", "'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData for", "Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", #", "- Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\", \"207\", \"208\",", "\"648\", \"649\", # Life Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\",", "item/doc exists # - Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source =", "'/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt' # Delimiter to use", "\"207\", \"208\", # Components - Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\",", "checking doubles[id] = 2 if id not in doubles else doubles[id] + 1", "\"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components - Class E \"2010\",", "# Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\",", "== \"\": return line if line[-1] == delim: line = line[0:-1] return [x.strip(strip)", "\"2038\", \"2039\", # Components - Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\",", "return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+docsHeader+\"'", "- Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\",", "# Delimiter to use in the exported csv delimiter = ';' # List", "check, we find docIDs that are missing in itemIDs itemSet = set(itemIDs) missingItems", "\"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components - Class F \"2020\", \"2021\",", "\"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\",", "\"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components - Class B \"170\",", "\"647\", \"648\", \"649\", # Life Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\",", "exists.\") if not haserror: print(\"✔️ All files in column '\"+header+\"' exist.\") if __name__", "items have a document # - Check if all documents point to an", "# - Check if all documents point to a unique item sanityCheck(itemData, header[\"itemId\"],", "- Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\",", "composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData", "found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to a csv: #", "\"401\", \"402\", # Components - Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\",", "in lines: if line[:2] == '//': continue # Ignore comments line = cleanLine(line,", "\"305\", \"322\", \"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\",", "over all items in docIDs and make sure they're unique seen = set()", "line. data[id] = line if len(doubles) > 0: for id in doubles: print(\"❌", "Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", #", "go over all items in docIDs and make sure they're unique seen =", "\"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\", # Life Support - Thermal", "There were no duplicate keys in: \"+file) return data def composeCsv(data, target): lines", "unable to continue sanity check.\") return haserror = False for i in data:", "header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData,", "open(path, 'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing:", "docData, header[\"docItemId\"]) # Check if the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"])", "the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have", "document's image header['docImage'] = '6 doc image' # The item ID that a", "= data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id '\"+i+\"'", "- Every ID between items and documents needs to be unique (the script", "For the orphaned check, we find docIDs that are missing in itemIDs itemSet", "len(doubles) > 0: for id in doubles: print(\"❌ The unique identifier '\"+id+\"' matched", "Components - Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components -", "Each file can only contain unique IDs (the exported csv will only contain", "header[list] def getIdentifierIndex(header, identifier): if identifier not in header: return -1 return header.index(identifier)", "# - Each file can only contain unique IDs (the exported csv will", "will also do some sanity checking, which should be useful for mod &", "\"6\", \"7\", \"8\", \"9\", \"10\", # Resources - Raw \"11\", \"20\", \"21\", \"22\",", "in '\"+source+\"'\") quit() # Parse the items, stored as item[id] data = {}", "getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable", "== -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\")", "headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in", "of IDs, find the IDs from itemIDS that are missing in docIDs docSet", "crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to a unique", "\"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade", "== delim: line = line[0:-1] return [x.strip(strip) for x in line.split(delim)] # Finds", "itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It will also do some sanity", "last commented line at the start of a file def getHeader(data): for idx,", "headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue sanity", "Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", #", "# The item ID that a doc would craft: header['docItemId'] = '9 CRAFTS", "\"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components - Class D \"219\", \"2001\",", "Takes a string and returns a list def cleanLine(line, strip, delim): line =", "\"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\",", "couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier =", "docdb.csv # It will also do some sanity checking, which should be useful", "file def getHeader(data): for idx, line in enumerate(data): if line[:2] != '//': return", "following item ID(s) do not have a crafting document: \"+', '.join(missingDocs)) print(\" Items", "# Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) #", "print(\"❌ The following item ID(s) have a crafting document, but the item does", "\"2081\", \"2082\", \"400\", \"401\", \"402\", # Components - Class M \"302\", \"300\", \"301\",", "-> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It will also do some", "in findMissing(), unable to continue sanity check.\") return itemIDs = [] for i", "print(\"------------------------------\") else: print(\"✔️ All documents point to a unique item.\") # We have", "if line[:2] != '//': return data[idx-1][2:] # Gets the index of the identifier", "else: print(\"✔️ All documents point to a unique item.\") # We have 2", "\"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life Support - Water \"640\", \"641\",", "Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\",", "for line in lines: if line[:2] == '//': continue # Ignore comments line", "line[0:-1] return [x.strip(strip) for x in line.split(delim)] # Finds the header, which is", "1 if len(duplicates) > 0: for id in duplicates: print(\"❌ The unique identifier", "\"218\", # Components - Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\",", "Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables", "\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources -", "itemSet = set(itemIDs) missingItems = [x for x in docIDs if x not", "sanityCheck(items, itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑", "in items: if i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i", "Components - Class H \"2040\", \"2041\", \"2042\", \"2043\", \"2044\", \"2045\", \"2046\", \"2047\", \"2048\",", "fn2, data2): duplicates = {} for id in data1.keys() & data2.keys(): if id", "guarantueed to be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) #", "different lines.\") print(\"❌ Duplicates were found. The script will only use the first", "documents point to an existing item # - Check if all documents point", "docData) # Sanity checks: # - Check if all items have a document", "\"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components - Class G \"2030\",", "only contain unique IDs (the exported csv will only contain the first match)", "header['docId'] = '1 DOC ID' # Name of the item's image header['itemImage'] =", "that a doc would craft: header['docItemId'] = '9 CRAFTS ID' ### End of", "fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes a string and returns a", "to look for when examining data. header = {} # You probably won't", "A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\", # Components", "# Check itemData and docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2):", "items (items_database.txt) header['itemId'] = '1 ITEM ID' # Unique sorting key of documents", "# Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", #", "getHeader(data): for idx, line in enumerate(data): if line[:2] != '//': return data[idx-1][2:] #", "### Code starts here ### import os # reads data from path def", "List of item IDs that don't have a crafting document ignoreUncraftable = [", "id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not in duplicates else", "duplicates[id] = 2 if id not in duplicates else duplicates[id] + 1 if", "'\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't", "Components - Class M \"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\",", "to be unique (the script will warn) # - Warns when an item", "locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter],", "quit() # Parse the items, stored as item[id] data = {} data[delimiter+'header'+delimiter] =", "does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an existing", "item[id] data = {} data[delimiter+'header'+delimiter] = header # store the header for future", "examining data. header = {} # You probably won't need to change this,", "fh = open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data #", "modpack creators: # - Each file can only contain unique IDs (the exported", "a list def cleanLine(line, strip, delim): line = line.strip() if line == \"\":", "> 0: print(\"❌ The following item ID(s) have a crafting document, but the", "The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The", "for idx, line in enumerate(data): if line[:2] != '//': return data[idx-1][2:] # Gets", "\"217\", \"218\", # Components - Class D \"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\",", "itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in", "\"670\", \"671\", \"691\", \"672\", \"673\", \"692\", \"674\", \"675\", \"693\", # Consumables \"676\", \"677\",", "'//': continue # Ignore comments line = cleanLine(line, '\\t ', ';') if line", "Check if all documents point to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"])", "found. The script will only use the first match per duplicate.\") print(\"------------------------------\") else:", "the items, stored as item[id] data = {} data[delimiter+'header'+delimiter] = header # store", "here ### import os # reads data from path def readFile(path): fh =", "Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ] ## These", "they're unique seen = set() duplicates = [x for x in docIDs if", "Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were", "in itemIDs if x not in docSet and x not in ignoreSet] if", "an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData, header[\"docImage\"]) print(\"\") input(\"All done. Press enter to", "IDs (the exported csv will only contain the first match) # - Every", "\"2048\", \"2049\", # Components - Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\",", "\"628\", \"629\", # Life Support - Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\",", "sorting key of items (items_database.txt) header['itemId'] = '1 ITEM ID' # Unique sorting", "Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\",", "and returns a list def cleanLine(line, strip, delim): line = line.strip() if line", "\"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\", #", "line[-1] == delim: line = line[0:-1] return [x.strip(strip) for x in line.split(delim)] #", "unless Momo changes this in an update. # Unique sorting key of items", "findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: # - Check if all items", "dataList): fh = open(path, 'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close()", "ID' ### End of configuration ### ### Code starts here ### import os", "or seen.add(x)] if len(duplicates) > 0: print(\"❌ The following item ID(s) have more", "exists # - Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox", "data # Writes a list of data to path def writeFile(path, dataList): fh", "# - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It will", "-1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\") return", "else doubles[id] + 1 else: # No duplicate, add the line. data[id] =", "missing in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for", "\"27\", \"28\", # Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\",", "Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\", \"324\", # Trade", "look for when examining data. header = {} # You probably won't need", "- Check if all documents point to a unique item sanityCheck(itemData, header[\"itemId\"], docData,", "\"2006\", \"2007\", \"2008\", \"2009\", # Components - Class E \"2010\", \"2011\", \"2012\", \"2013\",", "Duplicate checking doubles[id] = 2 if id not in doubles else doubles[id] +", "as item[id] data = {} data[delimiter+'header'+delimiter] = header # store the header for", "exported csv will only contain the first match) # - Every ID between", "added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a", "\"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life Support", "Name of the document's image header['docImage'] = '6 doc image' # The item", "data def composeCsv(data, target): lines = [] for item in data: # data", "duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that the column header[itemId] has", "first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys in:", "\"2082\", \"400\", \"401\", \"402\", # Components - Class M \"302\", \"300\", \"301\", \"351\",", "for i in data: if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if", "checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't locate", "'.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to a unique item.\") # We", "Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources", "the IDs from itemIDS that are missing in docIDs docSet = set(docIDs) ignoreSet", "print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found.", "missingDocs = [x for x in itemIDs if x not in docSet and", "Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\",", "# Let's go over all items in docIDs and make sure they're unique", "for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path) # Takes a", "== delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs: if i", "as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This script", "doubles else doubles[id] + 1 else: # No duplicate, add the line. data[id]", "locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items, stored as item[id] data", "Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity", "= [x for x in docIDs if x in seen or seen.add(x)] if", "an item/doc exists # - Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source", "if len(duplicates) > 0: for id in duplicates: print(\"❌ The unique identifier '\"+id+\"'", "all items have a document # - Check if all documents point to", "\"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\", # Components -", "= getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(),", "delimiter = ';' # List of item IDs that don't have a crafting", "= header # store the header for future use doubles = {} #", "doc would craft: header['docItemId'] = '9 CRAFTS ID' ### End of configuration ###", "= [] for item in data: # data is a dictionary-type, which is", "= set(itemIDs) missingItems = [x for x in docIDs if x not in", "\"738\", # Consumables ] ## These settings tell the script which title it", "attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑", "couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue sanity check.\") return haserror =", "\"25\", \"26\", \"27\", \"28\", # Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\",", "\"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components - Class G \"2030\", \"2031\", \"2032\",", "fh = open(path, 'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️", "csv: # - MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It", "itemIDs if x not in docSet and x not in ignoreSet] if len(missingDocs)", "# Finds the header, which is the last commented line at the start", "header['itemId'] = '1 ITEM ID' # Unique sorting key of documents (specs_database.txt) header['docId']", "not in ignoreSet] if len(missingDocs) > 0: print(\"❌ The following item ID(s) do", "that are uncraftable by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\")", "\"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", # Resources - Loot \"29\", \"100\",", "i == delimiter+'header'+delimiter: continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs: if", "the header, which is the last commented line at the start of a", "\"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\",", "IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for id in data1.keys()", "Life Support - Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\",", "\"157\", \"158\", \"168\", # Components - Class A \"160\", \"161\", \"162\", \"163\", \"154\",", "Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", #", "\"668\", \"669\", # Life Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\", \"673\",", "is a dictionary-type, which is guarantueed to be ordered by insertion. joiner =", "if id not in doubles else doubles[id] + 1 else: # No duplicate,", "Warns when an item doesn't have a doc for crafting # - Check", "exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an existing item attached.\")", "\"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials \"121\", \"117\",", "\"2049\", # Components - Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\",", "def getIdentifierIndex(header, identifier): if identifier not in header: return -1 return header.index(identifier) def", "The following item ID(s) have more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\")", "in findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if", "script will only use the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There", "# Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\",", "open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data # Writes a", "\"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\", \"322\",", "in docIDs and make sure they're unique seen = set() duplicates = [x", "\"214\", \"215\", \"216\", \"217\", \"218\", # Components - Class D \"219\", \"2001\", \"2002\",", "MOD/items/specs_database.txt -> docdb.csv # It will also do some sanity checking, which should", "reads data from path def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data", "if id in data: # Duplicate checking doubles[id] = 2 if id not", "\"625\", \"626\", \"627\", \"628\", \"629\", # Life Support - Water \"640\", \"641\", \"642\",", "Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\", \"104\", \"109\", \"118\", \"113\", # Materials", "id not in doubles else doubles[id] + 1 else: # No duplicate, add", "0: print(\"❌ The following item ID(s) have more than one crafting document: \"+',", "itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks:", "\"2044\", \"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components - Class I \"2050\", \"2051\",", "be useful for mod & modpack creators: # - Each file can only", "ignored uncraftables).\") # For the orphaned check, we find docIDs that are missing", "print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys in: \"+file) return data def", "\"118\", \"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\",", "ID(s) do not have a crafting document: \"+', '.join(missingDocs)) print(\" Items that are", "would craft: header['docItemId'] = '9 CRAFTS ID' ### End of configuration ### ###", "doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were", "\"169\", # Components - Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\",", "haserror: print(\"✔️ All files in column '\"+header+\"' exist.\") if __name__ == \"__main__\": itemData", "\"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\", \"303\", \"304\", \"305\",", "composeCsv(docData, 'specs_database.csv') # Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile,", "to '\"+file+\"', which doesn't exists.\") if not haserror: print(\"✔️ All files in column", "itemHeader, docs, docsHeader): itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't", "# - Orphaned documents # Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox", "the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have a crafting document", "well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This script exports", "We have 2 lists of IDs, find the IDs from itemIDS that are", "if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"' in '\"+source+\"'\") quit() # Parse", "id in data: # Duplicate checking doubles[id] = 2 if id not in", "\"219\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components -", "changes this in an update. # Unique sorting key of items (items_database.txt) header['itemId']", "data1, fn2, data2): duplicates = {} for id in data1.keys() & data2.keys(): if", "The following item ID(s) do not have a crafting document: \"+', '.join(missingDocs)) print(\"", "have more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents", "(the script will warn) # - Warns when an item doesn't have a", "- Raw \"11\", \"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", #", "items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the", "'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished writing: \"+path)", "\"2046\", \"2047\", \"2048\", \"2049\", # Components - Class I \"2050\", \"2051\", \"2052\", \"2053\",", "the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files", "\"2018\", \"2019\", # Components - Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\",", "file)) header = cleanLine(getHeader(lines), '\\t ', ';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex", "when an item doesn't have a doc for crafting # - Check if", "docData for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for", "if line == \"\": continue # Ignore empty lines id = line[identifierIndex] if", "Finds the header, which is the last commented line at the start of", "\"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\",", "the item's image header['itemImage'] = '6 icon image' # Name of the document's", "\"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components - Class", "id in doubles: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌", "change this, unless Momo changes this in an update. # Unique sorting key", "empty lines id = line[identifierIndex] if id in data: # Duplicate checking doubles[id]", "line[:2] != '//': return data[idx-1][2:] # Gets the index of the identifier from", "'specs_database.txt' # Delimiter to use in the exported csv delimiter = ';' #", "\"735\", \"736\", \"737\", \"738\", # Consumables ] ## These settings tell the script", "Code starts here ### import os # reads data from path def readFile(path):", "# - Check if all documents point to an existing item # -", "print(\"✔️ All documents point to a unique item.\") # We have 2 lists", "\"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components - Class G", "data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id '\"+i+\"' links", "\"167\", \"169\", # Components - Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\",", "items in docIDs and make sure they're unique seen = set() duplicates =", "in duplicates: print(\"❌ The unique identifier '\"+id+\"' matched \"+str(duplicates[id])+\" times in \"+fn1+\" and", "more than one crafting document: \"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point", "document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable by design can be added", "def getHeader(data): for idx, line in enumerate(data): if line[:2] != '//': return data[idx-1][2:]", "\"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\",", "def parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ',", "which doesn't exists.\") if not haserror: print(\"✔️ All files in column '\"+header+\"' exist.\")", "print(\"------------------------------\") else: print(\"✔️ All documents have an existing item attached.\") def checkFileLinks(data, header):", "\"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\", \"406\",", "{} data[delimiter+'header'+delimiter] = header # store the header for future use doubles =", "Ignore comments line = cleanLine(line, '\\t ', ';') if line == \"\": continue", "Checks that the column header[itemId] has en entry in the column header[docItemId] def", "at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to a csv: # -", "item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png for an item/doc", "if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue", "parseFile(file, identifier, ): lines = readFile(os.path.join(source, file)) header = cleanLine(getHeader(lines), '\\t ', ';')", "in data: if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source,", "if not os.path.isfile(os.path.join(source, file)): haserror = True print(\"❌ Item id '\"+i+\"' links to", "crafting document ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\",", "target): lines = [] for item in data: # data is a dictionary-type,", "# Consumables ] ## These settings tell the script which title it needs", "the ID that are duplicates for line in lines: if line[:2] == '//':", "check.\") return haserror = False for i in data: if i == delimiter+'header'+delimiter:", "= 'specs_database.txt' # Delimiter to use in the exported csv delimiter = ';'", "if len(doubles) > 0: for id in doubles: print(\"❌ The unique identifier '\"+id+\"'", "have an existing item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if", "key of items (items_database.txt) header['itemId'] = '1 ITEM ID' # Unique sorting key", "Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\", # Materials", "\"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life Support - Food \"620\", \"621\",", "header) if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to", "Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", #", "'specs_database.csv') # Check itemData and docData for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData)", "& data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] = 2 if id not", "itemIDs itemSet = set(itemIDs) missingItems = [x for x in docIDs if x", "return line if line[-1] == delim: line = line[0:-1] return [x.strip(strip) for x", "'\"+identifier+\"' in '\"+source+\"'\") quit() # Parse the items, stored as item[id] data =", "a doc for crafting # - Check if the .png for an item/doc", "# Unique sorting key of items (items_database.txt) header['itemId'] = '1 ITEM ID' #", "0: print(\"❌ The following item ID(s) have a crafting document, but the item", "to an existing item # - Check if all documents point to a", "the first match) # - Every ID between items and documents needs to", "\"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life Support -", "# Ignore empty lines id = line[identifierIndex] if id in data: # Duplicate", "not have a crafting document: \"+', '.join(missingDocs)) print(\" Items that are uncraftable by", "\"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\", \"404\", # Trade Goods \"405\",", "# List of item IDs that don't have a crafting document ignoreUncraftable =", "'\"+header+\"' in checkFileLinks(), unable to continue sanity check.\") return haserror = False for", "match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys in: \"+file)", "in ignoreSet] if len(missingDocs) > 0: print(\"❌ The following item ID(s) do not", "';' # List of item IDs that don't have a crafting document ignoreUncraftable", "MOD/items/items_database.txt -> itemdb.csv # - MOD/items/specs_database.txt -> docdb.csv # It will also do", "\"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\", # Life Support - Water \"640\",", "\"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\",", "\"113\", # Materials \"105\", \"106\", \"107\", \"108\", \"110\" , \"2000\", \"111\", \"115\", \"112\",", "itemIDS that are missing in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs", "only contain the first match) # - Every ID between items and documents", "the exported csv delimiter = ';' # List of item IDs that don't", "windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile = 'specs_database.txt'", "\"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods \"333\", \"341\", \"342\", \"340\", \"343\",", "2 if id not in doubles else doubles[id] + 1 else: # No", "\"400\", \"401\", \"402\", # Components - Class M \"302\", \"300\", \"301\", \"351\", \"353\",", "returns a list def cleanLine(line, strip, delim): line = line.strip() if line ==", "for duplicate IDs findDuplicateEntries(itemfile, itemData, docfile, docData) # Sanity checks: # - Check", "identifier): if identifier not in header: return -1 return header.index(identifier) def parseFile(file, identifier,", "sanity checking, which should be useful for mod & modpack creators: # -", "for id in data1.keys() & data2.keys(): if id == delimiter+'header'+delimiter: continue duplicates[id] =", "\"\": continue # Ignore empty lines id = line[identifierIndex] if id in data:", "Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\", \"2036\", \"2037\", \"2038\", \"2039\", #", "document, but the item does not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All", "image' # The item ID that a doc would craft: header['docItemId'] = '9", "duplicates for line in lines: if line[:2] == '//': continue # Ignore comments", "== '//': continue # Ignore comments line = cleanLine(line, '\\t ', ';') if", "\"+file) return data def composeCsv(data, target): lines = [] for item in data:", "\"2024\", \"2025\", \"2026\", \"2027\", \"2028\", \"2029\", # Components - Class G \"2030\", \"2031\",", "# Name of the document's image header['docImage'] = '6 doc image' # The", "future use doubles = {} # stores the ID that are duplicates for", "Life Support - Thermal \"660\", \"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\",", "# reads data from path def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n')", "doc for crafting # - Check if the .png for an item/doc exists", "\"203\", \"204\", \"205\", \"206\", \"207\", \"208\", # Components - Class C \"209\", \"210\",", "Writes a list of data to path def writeFile(path, dataList): fh = open(path,", "line.split(delim)] # Finds the header, which is the last commented line at the", "a list of data to path def writeFile(path, dataList): fh = open(path, 'w',", "composeCsv(data, target): lines = [] for item in data: # data is a", "else duplicates[id] + 1 if len(duplicates) > 0: for id in duplicates: print(\"❌", "= getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier == -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(),", "'6 doc image' # The item ID that a doc would craft: header['docItemId']", "docIDs if x in seen or seen.add(x)] if len(duplicates) > 0: print(\"❌ The", "\"609\", # Life Support - Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\",", "x in seen or seen.add(x)] if len(duplicates) > 0: print(\"❌ The following item", "= [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", #", "Materials \"150\", \"151\", \"152\", \"153\", \"164\", \"155\", \"156\", \"157\", \"158\", \"168\", # Components", "seen or seen.add(x)] if len(duplicates) > 0: print(\"❌ The following item ID(s) have", "\"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\",", "not exist: \"+', '.join(missingItems)) print(\"------------------------------\") else: print(\"✔️ All documents have an existing item", "which should be useful for mod & modpack creators: # - Each file", "\"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables ]", "existing item # - Check if all documents point to a unique item", "\"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️", "checking, which should be useful for mod & modpack creators: # - Each", "if line[-1] == delim: line = line[0:-1] return [x.strip(strip) for x in line.split(delim)]", "continue itemIDs.append(items[i][itemHeaderIdentifier]) docIDs = [] for i in docs: if i == delimiter+'header'+delimiter:", "for duplicate IDs def findDuplicateEntries(fn1, data1, fn2, data2): duplicates = {} for id", "Example for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile", "\"204\", \"205\", \"206\", \"207\", \"208\", # Components - Class C \"209\", \"210\", \"211\",", "use in the exported csv delimiter = ';' # List of item IDs", "item ID(s) have a crafting document, but the item does not exist: \"+',", "- Class A \"160\", \"161\", \"162\", \"163\", \"154\", \"159\", \"165\", \"166\", \"167\", \"169\",", "are duplicates for line in lines: if line[:2] == '//': continue # Ignore", "can only contain unique IDs (the exported csv will only contain the first", "tell the script which title it needs to look for when examining data.", "for when examining data. header = {} # You probably won't need to", "Duplicates were found. The script will only use the first match per duplicate.\")", "unique identifier '\"+id+\"' matched \"+str(doubles[id])+\" different lines.\") print(\"❌ Duplicates were found. The script", "Delimiter to use in the exported csv delimiter = ';' # List of", "\"111\", \"115\", \"112\", # Materials \"121\", \"117\", \"116\", \"124\", \"119\", \"123\", \"120\", \"122\",", "ID that a doc would craft: header['docItemId'] = '9 CRAFTS ID' ### End", "\"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life Support - Waste", "header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png for an item/doc exists checkFileLinks(itemData,", "and x not in ignoreSet] if len(missingDocs) > 0: print(\"❌ The following item", "itemHeader) if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to", "keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that the column header[itemId] has en", "sanity check.\") return itemIDs = [] for i in items: if i ==", "\"667\", \"668\", \"669\", # Life Support - Waste \"690\", \"670\", \"671\", \"691\", \"672\",", "> 0: print(\"❌ The following item ID(s) do not have a crafting document:", "the script which title it needs to look for when examining data. header", "= set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for x in itemIDs if", "docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and docData for duplicate", "\"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life Support - Waste \"690\", \"670\",", "the identifier from the header[list] def getIdentifierIndex(header, identifier): if identifier not in header:", "The following item ID(s) have a crafting document, but the item does not", "Sanity checks: # - Check if all items have a document # -", "Components - Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\",", "line if len(doubles) > 0: for id in doubles: print(\"❌ The unique identifier", "\"212\", \"213\", \"214\", \"215\", \"216\", \"217\", \"218\", # Components - Class D \"219\",", "use the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were no duplicate", "header[\"docItemId\"]) # Check if the .png for an item/doc exists checkFileLinks(itemData, header[\"itemImage\"]) checkFileLinks(docData,", "\"661\", \"662\", \"663\", \"664\", \"665\", \"666\", \"667\", \"668\", \"669\", # Life Support -", "== -1: print(\"🛑 couldn't locate '\"+header+\"' in checkFileLinks(), unable to continue sanity check.\")", "print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity check.\") return docsHeaderIdentifier", "# Gets the index of the identifier from the header[list] def getIdentifierIndex(header, identifier):", "line = cleanLine(line, '\\t ', ';') if line == \"\": continue # Ignore", "unable to continue sanity check.\") return docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader) if docsHeaderIdentifier ==", "Support - Water \"640\", \"641\", \"642\", \"643\", \"644\", \"645\", \"646\", \"647\", \"648\", \"649\",", "be found at: https://github.com/Katorone/Astrox-Imperium # This script exports 2 files to a csv:", "\"2028\", \"2029\", # Components - Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\", \"2035\",", "(the exported csv will only contain the first match) # - Every ID", "# Components - Class F \"2020\", \"2021\", \"2022\", \"2023\", \"2024\", \"2025\", \"2026\", \"2027\",", "id not in duplicates else duplicates[id] + 1 if len(duplicates) > 0: for", "All documents have an existing item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter],", "== delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in docIDs and", "\"679\", \"701\", \"680\", \"681\", \"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\",", "sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check if the .png for an item/doc exists", "Unique sorting key of items (items_database.txt) header['itemId'] = '1 ITEM ID' # Unique", "\"2027\", \"2028\", \"2029\", # Components - Class G \"2030\", \"2031\", \"2032\", \"2033\", \"2034\",", "# Components - Class I \"2050\", \"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\",", "Name of the item's image header['itemImage'] = '6 icon image' # Name of", "an update. # Unique sorting key of items (items_database.txt) header['itemId'] = '1 ITEM", "ignoreUncraftable = [ \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",", "icon image' # Name of the document's image header['docImage'] = '6 doc image'", "docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items", "\"116\", \"124\", \"119\", \"123\", \"120\", \"122\", # Materials \"150\", \"151\", \"152\", \"153\", \"164\",", "print(\"❌ The following item ID(s) do not have a crafting document: \"+', '.join(missingDocs))", "find docIDs that are missing in itemIDs itemSet = set(itemIDs) missingItems = [x", "\"26\", \"27\", \"28\", # Resources - Loot \"29\", \"100\", \"101\", \"103\", \"114\", \"102\",", "ID' # Unique sorting key of documents (specs_database.txt) header['docId'] = '1 DOC ID'", "Support - Food \"620\", \"621\", \"622\", \"623\", \"624\", \"625\", \"626\", \"627\", \"628\", \"629\",", "ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData and", "# Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\",", "are missing in itemIDs itemSet = set(itemIDs) missingItems = [x for x in", "\"2051\", \"2052\", \"2053\", \"2054\", \"2055\", \"2056\", \"2057\", \"2058\", \"2059\", # Components - Class", "def readFile(path): fh = open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return", "be ordered by insertion. joiner = '\"'+delimiter+'\"' lines.append('\"'+joiner.join(data[item])+'\"') writeFile(target, lines) # Check itemData", "# Components - Class E \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\",", "locate '\"+header+\"' in checkFileLinks(), unable to continue sanity check.\") return haserror = False", "mod & modpack creators: # - Each file can only contain unique IDs", "if itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue", "\"324\", # Trade Goods \"320\", \"321\", \"323\", \"325\", \"311\", \"310\", \"312\", \"313\", \"403\",", "return data def composeCsv(data, target): lines = [] for item in data: #", "strip, delim): line = line.strip() if line == \"\": return line if line[-1]", "= open(path, 'w', encoding='utf8', newline='') for line in dataList: fh.write(line+'\\r\\n') fh.close() print(\"✔️ Finished", "for windows: c:\\path\\to\\Astrox\\MOD\\items\\ source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/' itemfile = 'items_database.txt' docfile =", "seen = set() duplicates = [x for x in docIDs if x in", "\"+', '.join(duplicates)) print(\"------------------------------\") else: print(\"✔️ All documents point to a unique item.\") #", "', ';') if line == \"\": continue # Ignore empty lines id =", "!= '//': return data[idx-1][2:] # Gets the index of the identifier from the", "if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier] if not os.path.isfile(os.path.join(source, file)): haserror", "a doc would craft: header['docItemId'] = '9 CRAFTS ID' ### End of configuration", "\"710\", \"711\", # Consumables \"712\", \"702\", \"703\", \"735\", \"736\", \"737\", \"738\", # Consumables", "\"2004\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", # Components - Class E \"2010\", \"2011\",", "an item doesn't have a doc for crafting # - Check if the", "getIdentifierIndex(header, identifier): if identifier not in header: return -1 return header.index(identifier) def parseFile(file,", "will only use the first match per duplicate.\") print(\"------------------------------\") else: print(\"✔️ There were", "= open(path, 'r', encoding='utf8', newline='\\n') data = fh.readlines() fh.close() return data # Writes", "will only contain the first match) # - Every ID between items and", "csv delimiter = ';' # List of item IDs that don't have a", "return data[idx-1][2:] # Gets the index of the identifier from the header[list] def", "in data: # data is a dictionary-type, which is guarantueed to be ordered", "- Class J \"2080\", \"2081\", \"2082\", \"400\", \"401\", \"402\", # Components - Class", "an existing item attached.\") def checkFileLinks(data, header): headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header) if headerIdentifier", "that are missing in docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs =", "itemHeaderIdentifier == -1: print(\"🛑 couldn't locate '\"+itemHeader+\"' in findMissing(), unable to continue sanity", "docIDs docSet = set(docIDs) ignoreSet = set(ignoreUncraftable) missingDocs = [x for x in", "in \"+fn1+\" and \"+fn2+\".\") print(\"❌ Duplicate IDs were found across \"+fn1+\" and \"+fn2+\".\")", "orphaned check, we find docIDs that are missing in itemIDs itemSet = set(itemIDs)", "i in docs: if i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over", "-> docdb.csv # It will also do some sanity checking, which should be", "continue duplicates[id] = 2 if id not in duplicates else duplicates[id] + 1", "a dictionary-type, which is guarantueed to be ordered by insertion. joiner = '\"'+delimiter+'\"'", "document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") # For the orphaned check, we find", "i == delimiter+'header'+delimiter: continue docIDs.append(docs[i][docsHeaderIdentifier]) # Let's go over all items in docIDs", "\"208\", # Components - Class C \"209\", \"210\", \"211\", \"212\", \"213\", \"214\", \"215\",", "[x for x in itemIDs if x not in docSet and x not", "print(\"✔️ All items have a crafting document attached (with \"+str(len(ignoreUncraftable))+\" ignored uncraftables).\") #", "continue sanity check.\") return itemIDs = [] for i in items: if i", "\"+', '.join(missingDocs)) print(\" Items that are uncraftable by design can be added to", "\"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources - Raw \"11\", \"20\", \"21\",", "# Components - Class B \"170\", \"200\", \"201\", \"202\", \"203\", \"204\", \"205\", \"206\",", "of the document's image header['docImage'] = '6 doc image' # The item ID", "IDs were found across \"+fn1+\" and \"+fn2+\".\") print(\"------------------------------\") else: print(\"✔️ There were no", "set(itemIDs) missingItems = [x for x in docIDs if x not in itemSet]", "in docSet and x not in ignoreSet] if len(missingDocs) > 0: print(\"❌ The", "print(\"------------------------------\") else: print(\"✔️ There were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") #", "\"403\", \"404\", # Trade Goods \"405\", \"406\", \"407\", \"408\", # Trade Goods \"600\",", "be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All items have", "- MOD/items/specs_database.txt -> docdb.csv # It will also do some sanity checking, which", "not in header: return -1 return header.index(identifier) def parseFile(file, identifier, ): lines =", "\"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", # Resources - Raw \"11\", \"20\",", "should be useful for mod & modpack creators: # - Each file can", "\"674\", \"675\", \"693\", # Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\",", "\"__main__\": itemData = parseFile(itemfile, header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv')", "header[\"itemId\"]) composeCsv(itemData, 'items_database.csv') docData = parseFile(docfile, header[\"docId\"]) composeCsv(docData, 'specs_database.csv') # Check itemData and", "line if line[-1] == delim: line = line[0:-1] return [x.strip(strip) for x in", "\"2045\", \"2046\", \"2047\", \"2048\", \"2049\", # Components - Class I \"2050\", \"2051\", \"2052\",", "\"408\", # Trade Goods \"600\", \"601\", \"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\",", "# Consumables \"676\", \"677\", \"700\", \"678\", \"679\", \"701\", \"680\", \"681\", \"710\", \"711\", #", "== -1: print(\"🛑 couldn't locate '\"+docsHeader+\"' in findMissing(), unable to continue sanity check.\")", "design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py\") print(\"------------------------------\") else: print(\"✔️ All", "False for i in data: if i == delimiter+'header'+delimiter: continue file = data[i][headerIdentifier]", "in docIDs if x in seen or seen.add(x)] if len(duplicates) > 0: print(\"❌", "\"302\", \"300\", \"301\", \"351\", \"353\", \"350\", \"352\", \"330\", \"332\", \"331\", # Trade Goods", "were no duplicate keys across: \"+fn1+\" and \"+fn2+\".\") # Checks that the column", "';') identifierIndex = getIdentifierIndex(header, identifier) if identifierIndex == -1: print(\"🛑 couldn't locate '\"+identifier+\"'", "True print(\"❌ Item id '\"+i+\"' links to '\"+file+\"', which doesn't exists.\") if not", "data = fh.readlines() fh.close() return data # Writes a list of data to", "lines = [] for item in data: # data is a dictionary-type, which", "docSet and x not in ignoreSet] if len(missingDocs) > 0: print(\"❌ The following", "\"602\", \"603\", \"604\", \"605\", \"606\", \"607\", \"608\", \"609\", # Life Support - Food", "all documents point to a unique item sanityCheck(itemData, header[\"itemId\"], docData, header[\"docItemId\"]) # Check" ]
[ "from django.apps import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app", "from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def test_apps(self):", "import SimpleTestCase from django.apps import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\"", "django.test import SimpleTestCase from django.apps import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase):", "contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def test_apps(self): self.assertEqual(ContactConfig.name,", "<reponame>uktrade/dit-contact-forms from django.test import SimpleTestCase from django.apps import apps from contact.apps import ContactConfig", "django.apps import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config", "apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def", "ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def test_apps(self): self.assertEqual(ContactConfig.name, \"contact\") self.assertEqual(apps.get_app_config(\"contact\").name,", "from django.test import SimpleTestCase from django.apps import apps from contact.apps import ContactConfig class", "import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\"", "import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def test_apps(self): self.assertEqual(ContactConfig.name, \"contact\")", "class ContactConfigTestCase(SimpleTestCase): \"\"\" Test app config \"\"\" def test_apps(self): self.assertEqual(ContactConfig.name, \"contact\") self.assertEqual(apps.get_app_config(\"contact\").name, \"contact\")", "SimpleTestCase from django.apps import apps from contact.apps import ContactConfig class ContactConfigTestCase(SimpleTestCase): \"\"\" Test" ]
[ ".items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list", "[ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list =", "= check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no", "('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete", "= [ tag for tag in active_k8s_tag_list if tag != \"\" ] deletable_tags", "k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token)", "'__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+' have", "active_k8s_tag_list if tag != \"\" ] deletable_tags = [ tag for tag in", "branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag", "__name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with label", "the active remote branches and active k8s tags. # If a k8s tag", "This script compares the active remote branches and active k8s tags. # If", "check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-')", "the k8s objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"])", "k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for", "tag != \"\" ] deletable_tags = [ tag for tag in active_tags if", "return [ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list", "active_tags = [ tag for tag in active_k8s_tag_list if tag != \"\" ]", "k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\",", "if tag not in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete", "for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def", "= [ tag for tag in active_tags if tag not in active_branch_list ]", "for tag in active_tags if tag not in active_branch_list ] for tag in", "command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[],", "= int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token})", "url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches", "get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return", "\"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object", "[ tag for tag in active_tags if tag not in active_branch_list ] for", "= ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]):", "!= \"\" ] deletable_tags = [ tag for tag in active_tags if tag", "in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True)", "request # This script compares the active remote branches and active k8s tags.", "= raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ]", "tag in active_k8s_tag_list if tag != \"\" ] deletable_tags = [ tag for", "from subprocess import check_output import hashlib import os import json from urllib import", "branches name's, we delete all the k8s objects with this k8s tag. github_token", "hashed remote branches name's, we delete all the k8s objects with this k8s", "delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete =", "go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for", "= request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch", "= \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches =", "json from urllib import request # This script compares the active remote branches", "urllib import request # This script compares the active remote branches and active", "k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\",", "raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list =", "= [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches", "k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+' have been deleted')", "subprocess import check_output import hashlib import os import json from urllib import request", "objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches():", "k8s tags. # If a k8s tag doesn't match an active hashed remote", "import json from urllib import request # This script compares the active remote", "k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects", "{\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return", "--template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag", "active k8s tags. # If a k8s tag doesn't match an active hashed", "= [] active_tags = [ tag for tag in active_k8s_tag_list if tag !=", "hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\":", "tag not in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if", "active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template '{{range", "request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche", "in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template", "get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag for tag in active_k8s_tag_list", "for k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\",", "import request # This script compares the active remote branches and active k8s", "k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag", "request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in", "for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label)", "[ tag for tag in active_k8s_tag_list if tag != \"\" ] deletable_tags =", "import hashlib import os import json from urllib import request # This script", "'{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in", "check_output import hashlib import os import json from urllib import request # This", "tag for tag in active_tags if tag not in active_branch_list ] for tag", "match an active hashed remote branches name's, we delete all the k8s objects", "tags. # If a k8s tag doesn't match an active hashed remote branches", "[\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl", "hashlib import os import json from urllib import request # This script compares", "with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url", "this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url =", "if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with", "branches and active k8s tags. # If a k8s tag doesn't match an", "not in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__", "active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag for tag in active_k8s_tag_list if", "script compares the active remote branches and active k8s tags. # If a", "= os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url,", "active hashed remote branches name's, we delete all the k8s objects with this", "k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object", "+' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags =", "tag in active_tags if tag not in active_branch_list ] for tag in deletable_tags:", "for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete", "branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template", "k8s tag doesn't match an active hashed remote branches name's, we delete all", "\"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode()", "shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list if", "'+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = []", "[] active_tags = [ tag for tag in active_k8s_tag_list if tag != \"\"", "hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods", "= [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object =", "If a k8s tag doesn't match an active hashed remote branches name's, we", "for tag in active_k8s_tag_list if tag != \"\" ] deletable_tags = [ tag", "check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag for", "github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req =", "if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"]", "raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ] def", "active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in", "return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl", "value>','').split('cdtn-') return [ k8s_tag for k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label):", "k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def", "import os import json from urllib import request # This script compares the", "def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response =", "<gh_stars>0 from subprocess import check_output import hashlib import os import json from urllib", "= request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for", "in active_tags if tag not in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag)", "and active k8s tags. # If a k8s tag doesn't match an active", "def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag for tag in", "deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()):", "an active hashed remote branches name's, we delete all the k8s objects with", "response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size]", "def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in", "k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete)", "\"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector", "remote branches name's, we delete all the k8s objects with this k8s tag.", "tag for tag in active_k8s_tag_list if tag != \"\" ] deletable_tags = [", "k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags", "\"\" ] deletable_tags = [ tag for tag in active_tags if tag not", "branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags():", "for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o", "\"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete", "we delete all the k8s objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"]", "tag doesn't match an active hashed remote branches name's, we delete all the", "name's, we delete all the k8s objects with this k8s tag. github_token =", "active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__':", "active_tags if tag not in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return", "== '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+'", "] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for", "os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None,", "import check_output import hashlib import os import json from urllib import request #", "k8s_tag_list_to_delete = [] active_tags = [ tag for tag in active_k8s_tag_list if tag", "if tag != \"\" ] deletable_tags = [ tag for tag in active_tags", "] deletable_tags = [ tag for tag in active_tags if tag not in", "from urllib import request # This script compares the active remote branches and", "k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object,", "# If a k8s tag doesn't match an active hashed remote branches name's,", "req = request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for", "shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [ tag for tag", "deletable_tags = [ tag for tag in active_tags if tag not in active_branch_list", "k8s_tag in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\",", "remote branches and active k8s tags. # If a k8s tag doesn't match", "tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req", "None, {\"token\": github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())]", "k8s objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size = int(os.environ[\"HASH_SIZE\"]) def", "] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\",", "--selector branch=cdtn-'+label) check_output(command_to_delete_k8s_object, shell=True) def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]): k8s_tag_list_to_delete = [] active_tags = [", "pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [", "in active_branch_list ] for tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ ==", "delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list:", "delete all the k8s objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size", "active remote branches and active k8s tags. # If a k8s tag doesn't", "int(os.environ[\"HASH_SIZE\"]) def get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response", "\"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +'", "# This script compares the active remote branches and active k8s tags. #", "in k8s_tag_list if k8s_tag ] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\",", "os import json from urllib import request # This script compares the active", "get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list", "] def delete_k8s_object(label): k8s_object_list = [\"service\", \"ingress\", \"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object", "-o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\") k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-') return [ k8s_tag", "in active_k8s_tag_list if tag != \"\" ] deletable_tags = [ tag for tag", "compares the active remote branches and active k8s tags. # If a k8s", "a k8s tag doesn't match an active hashed remote branches name's, we delete", "in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list", "tag in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in", "def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'\", shell=True).decode(\"utf-8\")", "return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s", "doesn't match an active hashed remote branches name's, we delete all the k8s", "in deletable_tags: k8s_tag_list_to_delete.append(tag) return k8s_tag_list_to_delete if __name__ == '__main__': for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(),", "for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()): delete_k8s_object(k8s_tag_to_delete) print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+' have been", "[branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ]", "github_token}) response = request.urlopen(req) active_branches = [branch.get(\"head\").get(\"ref\").encode() for branch in json.loads(response.read())] return [", "\"configmap\", \"deployments\", \"statefulset\", \"pod\"] for k8s_object in k8s_object_list: command_to_delete_k8s_object = ('kubectl delete '+", "all the k8s objects with this k8s tag. github_token = os.environ[\"GITHUB_TOKEN\"] hash_size =", "json.loads(response.read())] return [ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list =", "[ hashlib.sha1(branche).hexdigest()[:hash_size] for branche in active_branches ] def get_active_k8s_tags(): raw_k8s_tag_list = check_output(\"kubectl get", "get_active_branches(): url = \"https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls\".format(github_token) req = request.Request(url, None, {\"token\": github_token}) response = request.urlopen(req)" ]
[ "(2, 8), (6, 0), (6, 1), (6, 2), (6, 6), (6, 7), (6,", "0), (6, 1), (6, 2), (6, 6), (6, 7), (6, 8), (7, 0),", "CORNER_ZERO = {(1, 1), (1, 7), (7, 1), (7, 7)} result = 1", "in enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1]", "in range(9)] result_mat = [] for line in mat: result_mat.append([int(val) for val in", "7), (2, 8), (6, 0), (6, 1), (6, 2), (6, 6), (6, 7),", "if in_line[1] == 0: print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if", "8), (8, 0), (8, 1), (8, 2), (8, 6), (8, 7), (8, 8)}", "7), (6, 8), (7, 0), (7, 2), (7, 6), (7, 8), (8, 0),", "(8, 0), (8, 1), (8, 2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO", "CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue if in_line[1] == 2: result", "for in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0:", "(6, 7), (6, 8), (7, 0), (7, 2), (7, 6), (7, 8), (8,", "{(0, 0), (0, 1), (0, 2), (0, 6), (0, 7), (0, 8), (1,", "7), (8, 8)} CORNER_ZERO = {(1, 1), (1, 7), (7, 1), (7, 7)}", "for line in enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE:", "1), (6, 2), (6, 6), (6, 7), (6, 8), (7, 0), (7, 2),", "CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO:", "2), (0, 6), (0, 7), (0, 8), (1, 0), (1, 2), (1, 6),", "2), (2, 6), (2, 7), (2, 8), (6, 0), (6, 1), (6, 2),", "(1, 0), (1, 2), (1, 6), (1, 8), (2, 0), (2, 1), (2,", "line in enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if", "in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue if (line[0], in_line[0])", "(6, 1), (6, 2), (6, 6), (6, 7), (6, 8), (7, 0), (7,", "for i in range(9)] result_mat = [] for line in mat: result_mat.append([int(val) for", "if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue if", "for val in line]) mat = result_mat for line in enumerate(mat): for in_line", "exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0)", "0), (1, 2), (1, 6), (1, 8), (2, 0), (2, 1), (2, 2),", "6), (2, 7), (2, 8), (6, 0), (6, 1), (6, 2), (6, 6),", "in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0) exit(0)", "(line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue if in_line[1]", "1), (1, 7), (7, 1), (7, 7)} result = 1 mat = [input()", "print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0)", "2), (6, 6), (6, 7), (6, 8), (7, 0), (7, 2), (7, 6),", "7)} result = 1 mat = [input() for i in range(9)] result_mat =", "(8, 8)} CORNER_ZERO = {(1, 1), (1, 7), (7, 1), (7, 7)} result", "range(9)] result_mat = [] for line in mat: result_mat.append([int(val) for val in line])", "(1, 6), (1, 8), (2, 0), (2, 1), (2, 2), (2, 6), (2,", "(0, 2), (0, 6), (0, 7), (0, 8), (1, 0), (1, 2), (1,", "{(1, 1), (1, 7), (7, 1), (7, 7)} result = 1 mat =", "2), (1, 6), (1, 8), (2, 0), (2, 1), (2, 2), (2, 6),", "result = 1 mat = [input() for i in range(9)] result_mat = []", "(2, 7), (2, 8), (6, 0), (6, 1), (6, 2), (6, 6), (6,", "8)} CORNER_ZERO = {(1, 1), (1, 7), (7, 1), (7, 7)} result =", "in CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue if (line[0], in_line[0]) in", "(8, 1), (8, 2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO = {(1,", "(7, 0), (7, 2), (7, 6), (7, 8), (8, 0), (8, 1), (8,", "(8, 7), (8, 8)} CORNER_ZERO = {(1, 1), (1, 7), (7, 1), (7,", "6), (0, 7), (0, 8), (1, 0), (1, 2), (1, 6), (1, 8),", "in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue if in_line[1] ==", "# https://quera.ir/problemset/contest/52544 CORNER_ONE = {(0, 0), (0, 1), (0, 2), (0, 6), (0,", "= 1 mat = [input() for i in range(9)] result_mat = [] for", "mat = result_mat for line in enumerate(mat): for in_line in enumerate(line[1]): if (line[0],", "2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO = {(1, 1), (1, 7),", "[] for line in mat: result_mat.append([int(val) for val in line]) mat = result_mat", "(7, 2), (7, 6), (7, 8), (8, 0), (8, 1), (8, 2), (8,", "enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] ==", "0), (8, 1), (8, 2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO =", "= result_mat for line in enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0])", "0), (7, 2), (7, 6), (7, 8), (8, 0), (8, 1), (8, 2),", "mat: result_mat.append([int(val) for val in line]) mat = result_mat for line in enumerate(mat):", "2), (7, 6), (7, 8), (8, 0), (8, 1), (8, 2), (8, 6),", "(0, 8), (1, 0), (1, 2), (1, 6), (1, 8), (2, 0), (2,", "in_line[1] == 0: print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1]", "val in line]) mat = result_mat for line in enumerate(mat): for in_line in", "1 mat = [input() for i in range(9)] result_mat = [] for line", "(0, 7), (0, 8), (1, 0), (1, 2), (1, 6), (1, 8), (2,", "mat = [input() for i in range(9)] result_mat = [] for line in", "(6, 8), (7, 0), (7, 2), (7, 6), (7, 8), (8, 0), (8,", "line]) mat = result_mat for line in enumerate(mat): for in_line in enumerate(line[1]): if", "= {(1, 1), (1, 7), (7, 1), (7, 7)} result = 1 mat", "0), (2, 1), (2, 2), (2, 6), (2, 7), (2, 8), (6, 0),", "8), (2, 0), (2, 1), (2, 2), (2, 6), (2, 7), (2, 8),", "(8, 6), (8, 7), (8, 8)} CORNER_ZERO = {(1, 1), (1, 7), (7,", "in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue if in_line[1] == 2:", "= [] for line in mat: result_mat.append([int(val) for val in line]) mat =", "(1, 8), (2, 0), (2, 1), (2, 2), (2, 6), (2, 7), (2,", "6), (6, 7), (6, 8), (7, 0), (7, 2), (7, 6), (7, 8),", "in mat: result_mat.append([int(val) for val in line]) mat = result_mat for line in", "for line in mat: result_mat.append([int(val) for val in line]) mat = result_mat for", "in_line in enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0)", "7), (0, 8), (1, 0), (1, 2), (1, 6), (1, 8), (2, 0),", "(7, 8), (8, 0), (8, 1), (8, 2), (8, 6), (8, 7), (8,", "continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1: print(0) exit(0) continue", "(7, 1), (7, 7)} result = 1 mat = [input() for i in", "1), (8, 2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO = {(1, 1),", "1), (2, 2), (2, 6), (2, 7), (2, 8), (6, 0), (6, 1),", "result_mat for line in enumerate(mat): for in_line in enumerate(line[1]): if (line[0], in_line[0]) in", "line in mat: result_mat.append([int(val) for val in line]) mat = result_mat for line", "in line]) mat = result_mat for line in enumerate(mat): for in_line in enumerate(line[1]):", "(2, 2), (2, 6), (2, 7), (2, 8), (6, 0), (6, 1), (6,", "if in_line[1] == 1: print(0) exit(0) continue if in_line[1] == 2: result *=", "result_mat.append([int(val) for val in line]) mat = result_mat for line in enumerate(mat): for", "(2, 6), (2, 7), (2, 8), (6, 0), (6, 1), (6, 2), (6,", "(2, 1), (2, 2), (2, 6), (2, 7), (2, 8), (6, 0), (6,", "(8, 2), (8, 6), (8, 7), (8, 8)} CORNER_ZERO = {(1, 1), (1,", "(0, 6), (0, 7), (0, 8), (1, 0), (1, 2), (1, 6), (1,", "== 0: print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] ==", "(1, 2), (1, 6), (1, 8), (2, 0), (2, 1), (2, 2), (2,", "6), (8, 7), (8, 8)} CORNER_ZERO = {(1, 1), (1, 7), (7, 1),", "result_mat = [] for line in mat: result_mat.append([int(val) for val in line]) mat", "= {(0, 0), (0, 1), (0, 2), (0, 6), (0, 7), (0, 8),", "(6, 0), (6, 1), (6, 2), (6, 6), (6, 7), (6, 8), (7,", "== 1: print(0) exit(0) continue if in_line[1] == 2: result *= 2 print(result)", "= [input() for i in range(9)] result_mat = [] for line in mat:", "(2, 0), (2, 1), (2, 2), (2, 6), (2, 7), (2, 8), (6,", "0), (0, 1), (0, 2), (0, 6), (0, 7), (0, 8), (1, 0),", "in_line[1] == 1: print(0) exit(0) continue if in_line[1] == 2: result *= 2", "i in range(9)] result_mat = [] for line in mat: result_mat.append([int(val) for val", "8), (7, 0), (7, 2), (7, 6), (7, 8), (8, 0), (8, 1),", "(7, 7)} result = 1 mat = [input() for i in range(9)] result_mat", "[input() for i in range(9)] result_mat = [] for line in mat: result_mat.append([int(val)", "https://quera.ir/problemset/contest/52544 CORNER_ONE = {(0, 0), (0, 1), (0, 2), (0, 6), (0, 7),", "enumerate(line[1]): if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue", "(0, 1), (0, 2), (0, 6), (0, 7), (0, 8), (1, 0), (1,", "8), (6, 0), (6, 1), (6, 2), (6, 6), (6, 7), (6, 8),", "8), (1, 0), (1, 2), (1, 6), (1, 8), (2, 0), (2, 1),", "1), (0, 2), (0, 6), (0, 7), (0, 8), (1, 0), (1, 2),", "(6, 6), (6, 7), (6, 8), (7, 0), (7, 2), (7, 6), (7,", "if (line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue if", "(line[0], in_line[0]) in CORNER_ONE: if in_line[1] == 0: print(0) exit(0) continue if (line[0],", "7), (7, 1), (7, 7)} result = 1 mat = [input() for i", "(6, 2), (6, 6), (6, 7), (6, 8), (7, 0), (7, 2), (7,", "1), (7, 7)} result = 1 mat = [input() for i in range(9)]", "6), (7, 8), (8, 0), (8, 1), (8, 2), (8, 6), (8, 7),", "CORNER_ONE = {(0, 0), (0, 1), (0, 2), (0, 6), (0, 7), (0,", "(1, 7), (7, 1), (7, 7)} result = 1 mat = [input() for", "0: print(0) exit(0) continue if (line[0], in_line[0]) in CORNER_ZERO: if in_line[1] == 1:", "(7, 6), (7, 8), (8, 0), (8, 1), (8, 2), (8, 6), (8,", "6), (1, 8), (2, 0), (2, 1), (2, 2), (2, 6), (2, 7)," ]
[ "setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ),", "setuptools import setup, find_packages import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for", "description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"], entry_points={", "from setuptools import setup, find_packages import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins", "find_packages import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages(", "import setup, find_packages import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\",", "import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\",", "versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"]", "setup, find_packages import versioneer setup( name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\",", "cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"],", "Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"], entry_points={ \"metagraph.plugins\": \"plugins=metagraph_stellargraph.plugins:find_plugins\" },", "version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\",", "for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"], entry_points={ \"metagraph.plugins\": \"plugins=metagraph_stellargraph.plugins:find_plugins\"", "author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"], entry_points={ \"metagraph.plugins\": \"plugins=metagraph_stellargraph.plugins:find_plugins\" }, )", "plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True, install_requires=[\"metagraph\", \"stellargraph\"], entry_points={ \"metagraph.plugins\":", "name=\"metagraph-stellargraph\", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=\"Stellargraph plugins for Metagraph\", author=\"<NAME>.\", packages=find_packages( include=[\"metagraph_stellargraph\", \"metagraph_stellargraph.*\"] ), include_package_data=True," ]
[ "are doing! *** # Export this package's modules as members: from secret import", "from secret import * from secret_version import * from get_secret import * from", "the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by", "not edit by hand unless you're certain you know what you are doing!", "Do not edit by hand unless you're certain you know what you are", "import * from secret_version import * from get_secret import * from get_secret_version import", "Export this package's modules as members: from secret import * from secret_version import", "*** # Export this package's modules as members: from secret import * from", "Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless", "Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand", "as members: from secret import * from secret_version import * from get_secret import", "file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***", "# *** Do not edit by hand unless you're certain you know what", "generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not", "WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***", "modules as members: from secret import * from secret_version import * from get_secret", "Tool. *** # *** Do not edit by hand unless you're certain you", "members: from secret import * from secret_version import * from get_secret import *", "hand unless you're certain you know what you are doing! *** # Export", "coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge", "certain you know what you are doing! *** # Export this package's modules", "secret import * from secret_version import * from get_secret import * from get_secret_version", "*** Do not edit by hand unless you're certain you know what you", "# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform", "*** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool.", "this package's modules as members: from secret import * from secret_version import *", "was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do", "# Export this package's modules as members: from secret import * from secret_version", "edit by hand unless you're certain you know what you are doing! ***", "know what you are doing! *** # Export this package's modules as members:", "what you are doing! *** # Export this package's modules as members: from", "*** # *** Do not edit by hand unless you're certain you know", "package's modules as members: from secret import * from secret_version import * from", "by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit", "* from secret_version import * from get_secret import * from get_secret_version import *", "(tfgen) Tool. *** # *** Do not edit by hand unless you're certain", "you know what you are doing! *** # Export this package's modules as", "this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #", "Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're", "you are doing! *** # Export this package's modules as members: from secret", "<filename>sdk/python/pulumi_aws/secretsmanager/__init__.py # coding=utf-8 # *** WARNING: this file was generated by the Pulumi", "you're certain you know what you are doing! *** # Export this package's", "doing! *** # Export this package's modules as members: from secret import *", "# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen)", "unless you're certain you know what you are doing! *** # Export this", "by hand unless you're certain you know what you are doing! *** #" ]
[ "\"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100,", "Test that valid input cashflows yield correct output payback values for idx, cf", "'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments that should be generated for", "\"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general", "{ \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\":", "numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])},", "numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": {", "1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\":", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\":", "8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0,", "i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated. Attributes: sample_measure", "{ \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794),", "\"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1,", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\":", "ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for", "19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136,", "{ \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}},", "\"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "\"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}},", "numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings", "{}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {", "(grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1',", "numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]),", "{\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: {", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2])", "= { \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181,", "20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": {", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "\"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3,", "\"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\": { \"original", "cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000,", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update'", "Measure objects and associated contributing microsegment keys that overlap with 'measures_supply_dist' Measure objects.", "is given as a tuple to be of comparable structure # to the", "m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1),", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine", "numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544),", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": {", "are correctly initiated. Attributes: sample_measure (object): Residential sample measure object. attribute_dict (dict): Dict", "5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50,", "0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088,", "\"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\":", "\"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2,", "= { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\":", "cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample", "numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])},", "0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5),", "130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\": { \"rate", "\"payback (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909,", "\"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\":", "= [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand", "21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\":", "16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273},", "10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30,", "\"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]),", "17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "{\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": {", "should be generated given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict): Measure", "5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]),", "2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794),", "savings and financial metrics outputs. Attributes: handyvars (object): Useful variables across the class.", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\":", "{}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2:", "\"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15,", "{\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\":", "5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "{ \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16,", "{}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "\"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\":", "10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}},", "\"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\",", "functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list =", "100, \"rate 7\": 110}, \"2010\": { \"rate 1\": 50, \"rate 2\": 60, \"rate", "1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: { \"rate", "array. ok_out_point_res (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 =", "that should be generated for each set of sample cash flows. \"\"\" @classmethod", "\"rate 3\": 105, \"rate 4\": 110, \"rate 5\": 115, \"rate 6\": 120, \"rate", "\"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\":", "2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336,", "{ \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": {", "16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8},", "'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize", "-70, \"rate 7\": -75}, \"2010\": { \"rate 1\": -40, \"rate 2\": -50, \"rate", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: {", "numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\": {", "4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2,", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "\"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\":", "0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23,", "1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([", "cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001,", "consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": { \"2009\": 95,", "{\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "{\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\":", "Set of sample input cash flows. ok_out (list): Outputs that should be generated", "5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": {", "in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": {", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15,", "measures w/ point value inputs.\"\"\" # Run measure competition routine on sample measures", "numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017),", "microsegments for each sample measure # following competition/supply-demand overlap adjustments for ind, d", "properly applies a climate zone/building type/end use partition to a total energy or", "5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\":", "{ \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20,", "\"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868,", "1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy", "19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply", "sample_measure_res (object): Sample residential measure data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme", "cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))],", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1:", "= cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist =", "attributes to keys from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key])", "compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None},", "\"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {", "and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this", "-1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08,", "\"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\":", "{\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "-380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\": { \"rate", "\"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([", "# Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'),", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample", "of 'out_break_walk' function. Verify that function properly applies a climate zone/building type/end use", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point #", "self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply,", "numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\":", "supply-side cooling measure 3. measures_all (list): List of all competing/interacting sample Measure objects", "{ \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": {", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with", "{ cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100,", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "a residential sample measure. ok_out_dist3 (dict): Measure attribute update status, savings, and portfolio/consumer-level", "{\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34,", "keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments that", "cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,", "(w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\":", "False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": {\"2009\":", "generate expected prioritization metric outputs. Attributes: handyvars (object): Useful variables across the class.", "13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136,", "[\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]},", "-10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\":", "in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5]", "\"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\":", "of point values. compete_meas4 (dict): Sample residential supply-side cooling measure 2. compete_meas5 (dict):", "2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\":", "'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this test suite test_meas = run.Measure(self.handyvars,", "[5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid", "{ \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}}},", "numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": {", "\"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\":", "{ \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\":", "in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\":", "{ \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": {", "self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\":", "for secondary market microsegment key chain being tested. secnd_adj_key (string): Key used to", "1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "\"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\":", "1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503,", "\"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\":", "int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield", "\"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\":", "to be tested import run # Import needed packages import unittest import numpy", "10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test", "numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\":", "24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10,", "\"rate 7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data", "in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of", "\"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows',", "\"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\":", "\"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\":", "20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10,", "60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30,", "numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10,", "2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate", "{\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "to be compared Raises: AssertionError: If dictionaries are not equal. \"\"\" # zip()", "\"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5,", "None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family", "metrics that should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1", "5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926,", "\"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and", "{\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455},", "Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2 (dict): Sample residential measure #2.", "{ \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46,", "\"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"],", "\"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794),", "{\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36},", "family home', 'existing']\": { \"total\": { yr: 10 for yr in cls.handyvars.aeo_years}, \"total", "0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy", "variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample", "\"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\":", "itertools import os class CommonTestMeasures(object): \"\"\"Class of common sample measures for tests. Attributes:", "-8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([", "{\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\":", "{ \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}},", "numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])},", "{ \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\":", "expected savings and financial metrics outputs. Attributes: handyvars (object): Useful variables across the", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1,", "None}, \"commercial\": { \"2009\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\":", "16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}},", "\"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\":", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary',", "105, \"rate 4\": 110, \"rate 5\": 115, \"rate 6\": 120, \"rate 7\": 125},", "{ \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}},", "30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}},", "\"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\":", "\"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "# Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'),", "\"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "equal; this should fail if one of the dicts # is empty, is", "21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\":", "-4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\":", "'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects and associated contributing microsegment keys", "\"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300},", "{ \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923,", "\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -150, \"2010\": -150},", "1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\":", "40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736,", "= [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost", "\"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\":", "\"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\":", "25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}}", "0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\":", "to adjust. a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict):", "str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj", "}} cls.compete_meas1 = { \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "\"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": {", "energy/carbon and associated cost input values instead of point values. compete_meas2 (dict): Sample", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\":", "100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}},", "0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)", "11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist =", "3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001,", "should fail if one of the dicts # is empty, is missing section(s),", "5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\":", "\"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\":", "50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50,", "dict self.dict_check(i, i2) # At the terminal/leaf node, formatted as a numpy array", "10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\":", "1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\":", "24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": {", "\"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1,", "at the current location in the dict structure, # the keys are equal;", "self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}},", "20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": {", "numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\":", "measure 3. compete_meas_dist (dict): Alternative version of sample commercial supply-side lighting measure 1", "{\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\":", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7,", "\"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\":", "measures_overlap2 (dict): List of demand-side Measure objects and associated contributing microsegment keys that", "measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind],", "{ \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an Engine instance", "\"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"],", "{ \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": {", "5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy", "carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07,", "sample measures w/ some array inputs.\"\"\" # Run the measure competition routine on", "11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317,", "\"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate", "\"rate 6\": 150, \"rate 7\": 160}, \"2010\": { \"rate 1\": 100, \"rate 2\":", "\"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\":", "1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": {", "8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "current keys are equal self.assertCountEqual(i, i2) # Continue to recursively traverse the dict", "used to link primary and secondary market microsegments (by climate, building type, structure", "\"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\":", "type/end use partition to a total energy or carbon market/savings value. Attributes: a_run", "initiated. Attributes: sample_measure (object): Residential sample measure object. attribute_dict (dict): Dict of sample", "5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\":", "{\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60},", "\"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15,", "0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114,", "30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\":", "{ \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\":", "numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": {", "15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": {", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 =", "\"rate 7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "output for the test run of the 'metric_update' # function function_output = engine_instance.metric_update(", "numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\":", "}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1,", "\"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\":", "numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\":", "\"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\":", "\"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\":", "10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update' function.", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3", "6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home',", "\"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\":", "{ \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\": -205,", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 50, \"rate 2\":", "\"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\":", "1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)])", "and assign it a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of this", "across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset", "10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\": None,", "cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346,", "family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": { \"all\":", "\"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\":", "{ \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6,", "adjust_key1 (string): First sample string for competed demand-side and supply-side market microsegment key", "8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\":", "\"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\":", "\"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\":", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\":", "0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]),", "ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market", "\"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "{ \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\":", "{ \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}},", "\"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([", "\"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\":", "competed primary market microsegment key chain being tested. overlap_key_scnd (string): Second sample string", "\"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\":", "1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571,", "needed to finalize point value test measure # consumer metrics consumer_metrics_final = [{", "\"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6,", "41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": {", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": {", "2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346),", "value test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind]", "is the key and the second item is the value; # in the", "numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "-155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\": { \"rate 1\": -135, \"rate", "1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\":", "(total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\":", "\"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15},", "\"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample", "m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": {", "\"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\":", "\"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", ".10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20},", "information needed to finalize point value test measure # consumer metrics consumer_metrics_final =", "15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\":", "-140, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160,", "11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist", "4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]),", "\"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"],", "\"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0,", "and supply-side market microsegment key chain being tested. compete_meas1 (dict): Sample residential demand-side", "{ \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20,", "{ \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1,", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def", "= 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key =", "competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77,", "{\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = {", "consumer # metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\": { \"2009\": None,", "a climate zone/building type/end use partition to a total energy or carbon market/savings", "0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\": { \"2009\": numpy.array([", "'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\",", "2.44}, \"irr (w/ energy and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback", "\"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\":", "\"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete measure", "\"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function output given", "numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2,", "ok_out_dicts (list): Output annuity equivalent Net Present Value dicts that should be generated", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\":", "numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\":", "5\": -180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\": { \"rate 1\": -190,", "25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5,", "carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([", "0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07,", "tuple is the key and the second item is the value; # in", "keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side Measure", "respectively, at the current level of the recursive # exploration of dict1 and", "that 'compete_com_primary' correctly calculates primary market shares and updates master microsegments for a", "4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574,", "\"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2,", "1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\":", "2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\":", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"]", "{\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948,", "5\": -180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\": {", "{ \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": {", "supply-side market microsegment key chain being tested. adjust_key2 (string): Second sample string for", "21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981,", "200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\":", "1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str(", "0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "cls.compete_meas1_dist = { \"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -40, \"rate", "\"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "[-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]] cls.ok_out", "{\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}}", "2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\":", "{ \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete", "microsegments (by climate, building type, structure type). compete_meas1 (dict): Sample commercial supply-side lighting", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point", "enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\":", "\"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114,", "family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\":", "[run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist =", "10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65,", "\"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\":", "1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key2: { \"stock\": {", "the class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string): First sample string", "4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\": {", "numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08,", "Measure objects. measure_master_msegs_out (dict): Master market microsegments that should be generated for each", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\":", "-0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": {", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create", "6\": 150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\":", "demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check", "21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592,", "measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock cost and", "for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at", "heating and cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables across the class.", "\"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\":", "and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [", "\"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\":", "44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\":", "\"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\":", "for running the engine \"\"\" # Import code to be tested import run", "case, verify correct adoption/competition scenario # keys for measure markets/savings/portfolio metrics for adopt_scheme", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\":", "205, \"rate 2\": 100, \"rate 3\": 105, \"rate 4\": 110, \"rate 5\": 115,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466,", "-105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common", "the class. measure_list (list): List for Engine including one sample residential measure. ok_cashflows", "-1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc", "\"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\":", "\"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\":", "1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate", "-400}, \"2010\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate", "20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]),", "cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]]", "Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict):", "1 including lists stock cost input values instead of point values. measures_all (list):", "Adjust/finalize point value test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv']", "-110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\": { \"rate 1\": -90, \"rate", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\":", "numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]),", "30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}},", "market ('ok_master_mseg_dist3'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) #", "# Run the measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2,", "primary market microsegment key chain being tested. overlap_key_scnd (string): Second sample string for", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\":", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": {", "# If the recursion has not yet reached the terminal/leaf node if isinstance(i,", "2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "6\": 110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\":", "\"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": {", "{ \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": {", "15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15},", "5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\":", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\":", "\"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0,", "0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max", "\"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def", "\"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = {", "2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346),", "1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\":", "10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": {", "should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict): Measure", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) #", "5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\":", "{ \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": {", "\"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\":", "\"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\":", "\"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "\"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\":", "0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "\"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]),", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def", "{ \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\": -150,", "{ \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": numpy.array([95,", "15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "{ \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])},", "{\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr", "'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": {", "33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "each sample measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures):", "\"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\":", "2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None},", "{ \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": {", "{\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": {", "self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test", "\"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\":", "value inputs.\"\"\" # Initialize test measure and assign it a sample 'uncompeted' #", "\"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])", "-4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\":", "\"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\":", "27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975,", "8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": {", "\"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)},", "10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6,", "CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\":", "sample_measure (dict): Sample residential measure #1. sample_measure2 (dict): Sample residential measure #2. sample_measure3", "\"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 =", "22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227,", "5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "{\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings", "of 'measures_all' with secondary microsegments to adjust. a_run (object): Analysis engine object incorporating", "5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock cost\": { \"residential\":", "[20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\":", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance", "0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {", "sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output for the test", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance", "\"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\":", "{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\":", "values\": { cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "\"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0,", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\":", "{ \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])},", "-1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate", "cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "ok_out (list): Outputs that should be generated for each set of sample cash", "\"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\":", "\"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\":", "{ \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0,", "measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for", "choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1,", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226,", "savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3,", "15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0,", "str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str(", "{ \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "be generated for each Measure object in 'measures_all' following competition and supply-demand overlap", "the operation of the 'convert_to_numpy' function. Verify that the function converts terminal/leaf node", "1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2,", "numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018),", "5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20,", "24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12},", "\"\"\"Test the operation of the 'payback' function. Verify cashflow input generates expected payback", "measure 1. compete_meas1_dist (dict): Alternative version of sample residential demand-side cooling measure 1", "-0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce", "-1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy", "numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([", "6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None,", "\"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]),", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics", "-8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\":", "30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273,", "measure. ok_num_units (int): Sample number of competed units. ok_base_life (int): Sample baseline technology", "17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6,", "{ \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": {", "\"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\":", "{\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366},", "and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure", "(w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44,", "function. Verify cashflow input generates expected payback output. Attributes: handyvars (object): Useful variables", "\"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "including stock cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure lifetime", "str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2", "\"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')):", "'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\":", "(LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting", "{ cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100,", "None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\":", "0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\":", "cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None,", "8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}},", "residential sample measure. ok_out_point_com (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict): Measure attribute update status,", "{\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\":", "avoided carbon costs. ok_out_dicts (list): Output annuity equivalent Net Present Value dicts that", "numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1},", "{}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {},", "{ \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}},", "lighting measure 1. compete_meas2 (dict): Sample commercial supply-side lighting measure 2. compete_meas3 (dict):", "compete_meas1_dist (dict): Alternative version of sample residential demand-side cooling measure 1 including lists", "'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand',", "0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\":", "numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455,", "0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\":", "\"\"\"Test outcomes given sample measures w/ point value inputs.\"\"\" # Run measure competition", "\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]),", "0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and", "{ \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock", "\"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "ok_partitions (dict): Sample results partitioning fraction. ok_out (dict): Sample partitioned measure results data.", "\"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1,", "{\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20,", "8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\":", "6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize array test measure consumer #", "numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]),", "9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run", "42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592,", "{ \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\":", "\"\"\" # zip() and zip_longest() produce tuples for the items # identified, where", "20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233,", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "{ \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}},", "{ \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08,", "100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\":", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0,", "copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5]", "\"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}},", "cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\":", "Sample consumer adoption scheme. overlap_key (string): First sample string for competed primary market", "ok_total (dict): Sample unpartitioned measure results data. ok_partitions (dict): Sample results partitioning fraction.", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": {", "\"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\":", "\"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\":", "update status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist1'", "numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings", "\"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 =", "sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this test suite test_meas =", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233,", "{\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": {", "test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using", "15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\":", "\"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\":", "0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46,", "CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function. Verify that the function converts", "13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341,", "cost delta. ok_esave (int): Sample measure energy savings. ok_ecostsave (int): Sample measure energy", "def test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/ point value inputs.\"\"\" #", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}},", "10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": {", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2,", "cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "residential supply-side cooling measure 3. measures_all (list): List of all competing/interacting sample Measure", "30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183,", "test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)):", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\":", "numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}},", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([", "1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}},", "\"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 =", "overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "1}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1,", "self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1])", "\"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\":", "{\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16,", "\"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "2\": -195, \"rate 3\": -190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\":", "\"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45,", "numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07,", "{\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist", "\"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5,", "= str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key", "{\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\":", "numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\":", "numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45,", "numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\":", "-8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20,", "\"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate", "measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for", "\"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\":", "measure competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove", "5\": 140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\": {", "numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])},", "a residential sample measure. ok_out_dist4 (dict): Measure attribute update status, savings, and portfolio/consumer-level", "numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72),", "{ \"2009\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate", "savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667,", "cls.ok_out = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10},", "21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {", "5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}},", "\"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\":", "{ \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": {", "\"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None,", "4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full", "\"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]),", "market ('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "for each Measure object in 'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\"", "numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": {", "\"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "{\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "{ \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": {", "\"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378,", "{ \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": {", "r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "[100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\":", "0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07,", "# respectively, at the current level of the recursive # exploration of dict1", "\"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate", "\"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\":", "= run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase,", "{\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "object incorporating all 'measures_primary' objects. measures_all_dist (list): List of competing measures including some", "2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\":", "climate zone/building type/end use partition to a total energy or carbon market/savings value.", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 85, \"rate 2\": 90, \"rate", "(dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should be", "numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "\"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4", "9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\":", "6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\":", "[\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"],", "{\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\":", "\"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\":", "{ \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": {", "'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\":", "{ \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])},", "9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "If dictionaries are not equal. \"\"\" # zip() and zip_longest() produce tuples for", "compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "carbon market/savings value. Attributes: a_run (object): Sample analysis engine object. ok_total (dict): Sample", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": {", "input values instead of point values. measures_all (list): List of all competing measures", "44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\":", "{ \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934,", "\"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary,", "{ \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}},", "\"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg", "\"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767,", "# following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][", "Verify cashflow input generates expected payback output. Attributes: handyvars (object): Useful variables across", "20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([", "\"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\":", "# Test for correct data types in measure markets attribute for adopt_scheme in", "self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\":", "[-10, 14, 2, 3, 4], [-10, 0, 1, 2], [10, 4, 7, 8,", "self.ok_savings_mkts_comp_schemes) # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify", "[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist =", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics", "test measure consumer # metrics consumer_metrics = [{ \"stock cost\": { \"residential\": {", "\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "{\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001},", "10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366,", "10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10,", "\"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\":", "5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014),", "\"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\":", "10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test", "22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650,", "1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\":", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all =", "parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\":", "\"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\":", "and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\":", "\"2009\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\":", "numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\":", "values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy,", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case, verify correct adoption/competition scenario #", "overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase,", "numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([", "\"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\":", "microsegment including all point values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure", "1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07,", "zone/building type/end use partition to a total energy or carbon market/savings value. Attributes:", "(list): List of competing measures including some measures with array inputs. measures_secondary_dist (list):", "10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]),", "\"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10 for yr in", "competing commercial measures; and that 'secondary_adj' correctly adjusts any secondary markets associated with", "numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51,", "{ \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}},", "numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150},", "of sample residential supply-side cooling measure 1 including lists of stock cost input", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max", "\"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0,", "microsegment keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating", "150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7,", "\"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using test measure, run function on", "2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon", "\"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\":", "105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\": { \"rate 1\": 85, \"rate", "output from zip_longest() fill_val = ('substituted entry', 5.2) # In this structure, k", "(dict): Sample residential measure #1. sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict):", "numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": {", "numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\":", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": {", "17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499,", "-0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object):", "energy (total captured)\": {}, \"original energy (competed and captured)\": {}, \"adjusted energy (total", "(w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\":", "\"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\":", "{ \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\":", "self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly", "}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": {", "conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if x is not None:", "10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\":", "2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\":", "numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/", "\"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\":", "18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626,", "numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\":", "1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback", "10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]),", "carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032,", "numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018),", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family", "\"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\":", "self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array", "all 'measures_primary' objects. measures_all_dist (list): List of competing measures including some measures with", "-0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08,", "self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure", "(annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200},", "(competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in", "\"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\":", "\"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\":", "22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}},", "None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for", "across the supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply,", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\":", "12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20,", "def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test", "heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine", "15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227,", "portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_point' with a residential sample", "\"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\":", "\"key 1\": { \"nested key 1\": [1, 2, 3, 4, 5], \"nested key", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": {", "\"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": {", "7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\":", "market overlaps across the supply and demand sides of # heating and cooling", "\"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94,", "5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": {", "{ \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {", "the case where the dicts are not of identical size, # zip_longest() will", "consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\":", "-115, \"rate 7\": -120}}}}] # Adjust/finalize point value test measure consumer metrics for", "\"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\":", "26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}},", ".25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\":", "2\": 90, \"rate 3\": 95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\":", "adoption scheme. overlap_key (string): First sample string for competed primary market microsegment key", "of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of supply-side", "{ \"2009\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate", "import unittest import numpy import copy import itertools import os class CommonTestMeasures(object): \"\"\"Class", "4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy", "test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus", "numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality of two dicts. Args:", "0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": {", "-0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08,", "(grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed", "{\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682},", "a total energy or carbon market/savings value. Attributes: a_run (object): Sample analysis engine", "choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10,", "in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\":", "\"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\":", "valid sample measures w/ point value inputs.\"\"\" # Run the measure competition routine", "\"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\":", "-100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\":", "value inputs.\"\"\" # Run the measure competition routine on sample demand-side measures self.a_run.compete_res_primary(", "sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the", "21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15,", "\"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None,", "cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions = {", "\"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {", "'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family", "\"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\": -150, \"rate", "{ \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6,", "\"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\":", "x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback'", "[\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\",", "needed to finalize array test measure consumer # metrics consumer_metrics_final_dist = [{ \"stock", "\"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\":", "'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all)", "instead of point values. measures_all (list): List of all competing measures with point", "copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based on above", "numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": {", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5,", "isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2)", "cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1',", "20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60,", "numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24,", "service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg", "inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'.", "Remove any market overlaps across the supply and demand sides of # heating", "None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": {", "run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self):", "{\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 30,", "2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([", "\"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\":", "135}])}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "input generates expected payback output. Attributes: handyvars (object): Useful variables across the class.", "{\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\":", "\"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150,", "test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using", "\"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point value inputs.\"\"\"", "run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure,", "\"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\",", "0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "\"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185,", "-0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([", "self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand sides", "market microsegments that should be generated for each Measure object in 'measures_all_dist' following", "numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2,", "5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {", "across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years =", "\"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\":", "\"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\": {", "0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34,", "1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95,", "self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function. Verify", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def", "{\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40},", "in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current location in the", "\"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None},", "measure energy cost savings. ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave (int):", "30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977,", "that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side Measure objects", "cost input values instead of point values. measures_all (list): List of all competing", "0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\":", "41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300,", "\"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\":", "identified, where in the case of a dict, the first item # in", "-400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\":", "4\": 120, \"rate 5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\":", "test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus", "\"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\":", "1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539),", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7,", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\":", "0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\":", "measure 1. compete_meas3_dist (dict): Alternative version of sample residential supply-side cooling measure 1", "Sample commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative version of sample commercial", "16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\":", "the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave,", "the operation of the 'calc_savings_metrics' function. Verify that measure master microsegment inputs yield", "be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict): Measure attribute", "}}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\", \"active\": 1,", "sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample", "2\": -95, \"rate 3\": -100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\":", "20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}},", "10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22,", "class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}},", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 =", "{ \"residential\": { \"2009\": None, \"2010\": None }, \"commercial\": { \"2009\": None, \"2010\":", "\"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\":", "Sample baseline->measure stock cost delta. ok_esave (int): Sample measure energy savings. ok_ecostsave (int):", "-0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\":", "measure avoided carbon emissions. ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts (list):", "(k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the", "0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\":", "1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}},", "\"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\": None,", "10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "{ \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27,", "numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2,", "{\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36},", "\"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None},", "1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/", "energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\":", "\"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\": 130, \"rate", "\"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])},", "10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}},", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\":", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for", "\"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\":", "-0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\":", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -190, \"rate", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30,", "object. ok_total (dict): Sample unpartitioned measure results data. ok_partitions (dict): Sample results partitioning", "10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}},", "\"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "\"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance", "home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "{ \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": {", "\"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2,", "= { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/", "numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": {", "\"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": {", "Measure objects and associated contributing microsegment keys that overlap with 'measures_demand_dist' Measure objects.", "test_ok(self): \"\"\"Test for correct function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions,", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]),", "{ cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\":", "# is empty, is missing section(s), or has different key names self.assertEqual(k, k2)", "1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\":", "0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\":", "not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation", "financial metrics that should be generated given 'ok_master_mseg_dist1' with a residential sample measure.", "\"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\":", "105, 89, 145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8,", "\"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\":", "supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the", "0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}},", "{ \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": {", "\"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\":", "11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\":", "measure, run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") #", "this point in all # test files) def main(): \"\"\"Trigger default behavior of", "test measure # consumer metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\": {", "15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068,", "\"\"\"Test the operation of the 'convert_to_numpy' function. Verify that the function converts terminal/leaf", "numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}]", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236,", "\"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\":", "test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point value inputs.\"\"\" # Run measure", "150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61,", "of the 'convert_to_numpy' function. Verify that the function converts terminal/leaf node lists in", "None}, \"commercial\": { \"2009\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\":", "\"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185,", "{\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": {", "\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\": -50},", "0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "{ \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": {", "1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20,", "8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]),", "\"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\":", "{ \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977,", "None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\":", "'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array test", "measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\":", "150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4,", "ok_out_dist3 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\":", "\"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\":", "\"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\":", "carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09,", "numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])},", "formatted as a point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that", "0, 1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]] cls.ok_out =", "-155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\":", "\"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3,", "\"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": {", "None}}, \"energy cost\": { \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\":", "\"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319,", "None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0,", "cost\": { \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\":", "CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\":", "numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "\"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate", "cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single", "'measures_all' objects. measures_all_dist (list): List including competing/interacting sample Measure objects with array inputs.", "{ \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\":", "-95, \"rate 3\": -100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115,", "15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\":", "\"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"],", "4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000,", "8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])},", "\"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\":", "4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "\"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "None}}, \"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\":", "numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])},", "{ \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\":", "-0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])},", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40},", "0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])},", "cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": {", "cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\":", "-0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]),", "23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\":", "numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "\"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380,", "45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\":", "'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\":", "40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}},", "1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785,", "\"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}},", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\":", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array(", "level of the recursive # exploration of dict1 and dict2, respectively for (k,", "class. sample_measure_res (object): Sample residential measure data. sample_measure_com (object): Sample commercial measure data.", "22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183,", "keys for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())),", "measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": {", "\"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\":", "\"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\":", "are the keys that correspond to # the dicts or unitary values that", "-0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333,", "2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113,", "microsegment key chain being tested. secnd_adj_key (string): Key used to link primary and", "3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\":", "numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([", "\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\": -50},", "100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\":", "2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}},", "numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]),", "heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments", "None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\":", "-350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\": -380, \"rate 5\": -390,", "{}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\":", "portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\":", "\"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]),", "\"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{", "0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array inputs.\"\"\" #", "test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus", "\"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\":", "{ \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\":", "the equality of two dicts. Args: dict1 (dict): First dictionary to be compared", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with", "this # value is given as a tuple to be of comparable structure", "section(s), or has different key names self.assertEqual(k, k2) # If the recursion has", "10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0])", "\"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": { cls.secnd_adj_key:", "{ \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30,", "\"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted", "30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15},", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: {", "5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736,", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2,", "\"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\":", "of two dicts. Args: dict1 (dict): First dictionary to be compared dict2 (dict):", "\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {", "family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2],", "\"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\":", "cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family", "\"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\":", "2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments", "3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr", "\"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def", "values instead of point values. compete_meas4 (dict): Sample residential supply-side cooling measure 2.", "\"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\":", "13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682,", "sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\":", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341,", "cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\":", "None}, \"commercial\": { \"2009\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\":", "\"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": {", "self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array", "(annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8,", "{}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\":", "\"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\":", "6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])},", "ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\":", "and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this", "numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) #", "(object): Sample commercial measure data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float):", "energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\":", "0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": {", "0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test", "2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346),", "95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {", "run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def", "2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate", "residential measure #2. sample_measure3 (dict): Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure", "{ \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}},", "{ \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}},", "{\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon", "\"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) #", "compete_meas3 (dict): Sample commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative version of", "numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])},", "Attributes: handyvars (object): Useful variables across the class. measure_list (list): List for Engine", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5", "0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {", "{ \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}},", "1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "one sample residential measure. ok_num_units (int): Sample number of competed units. ok_base_life (int):", "\"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\":", "r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "\"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys", "{ \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18,", "0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\":", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]),", "0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([", "numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10,", "4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]),", "numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671,", "2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\": { \"2009\":", "2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\": { \"key 1\": { \"nested", "5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]),", "{ \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000,", "'compete_com_primary' correctly calculates primary market shares and updates master microsegments for a series", "numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\":", "{ \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": {", "15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": {", "6\": -160, \"rate 7\": -170}}}}, { \"stock cost\": { \"residential\": { \"2009\": None,", "\"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\":", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1,", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\":", "{ \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": {", "lifetime ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure stock", "scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95},", "{ \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\":", "Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample measure", "secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check", "\"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\":", "numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\":", "\"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical", "}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10,", "-0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost", "12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9,", "21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592,", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794),", "key 2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\": { \"key 1\": {", "\"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array(", "\"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\":", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": {", "has not yet reached the terminal/leaf node if isinstance(i, dict): # Test that", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5,", "Sample partitioned measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test", "42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\":", "metrics that should be generated given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\"", "\"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\":", "{ \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])},", "numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40},", "{ \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": {", "4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([", "{ \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": {", "4], [-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]]", "1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\":", "2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346),", "numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0,", "1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon", "\"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\": None,", "{ \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": {", "11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0,", "-75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20},", "\"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\":", "0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": {", "self.measure_list) # Record the output for the test run of the 'metric_update' #", "numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "0.33, 0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20,", "numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794),", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\":", "\"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "{ \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}},", "\"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7,", "{ \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}},", "7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "\"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\":", "10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455,", "6\": 100, \"rate 7\": 110}, \"2010\": { \"rate 1\": 50, \"rate 2\": 60,", "1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.overlap_key: {", "benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08,", "\"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\":", "\"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1},", "7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}},", "\"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\":", "def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test", "Sample residential measure #2. sample_measure3 (dict): Sample commercial measure #1. \"\"\" def __init__(self):", "{ \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": {", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0", "\"2009\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\":", "{ \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}},", "(dict): List of supply-side Measure objects and associated contributing microsegment keys that overlap", "'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = {", "{ \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}},", "10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": {", "10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0,", "benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07,", "numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262,", "{\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\":", "[\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\":", "markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings", "-0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353},", "\"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\":", "{\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\":", "-8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "-160, \"rate 7\": -170}, \"2010\": { \"rate 1\": -135, \"rate 2\": -140, \"rate", "measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of", "cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP',", "{ cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify", "'secondary_adj' correctly adjusts any secondary markets associated with these primary market microsegments. Attributes:", "array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict):", "6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04,", "that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist'", "metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\":", "measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize array test", "{ \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": {", "valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "be compared Raises: AssertionError: If dictionaries are not equal. \"\"\" # zip() and", "2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\":", "{ \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21,", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\":", "{\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "{ \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": {", "{\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None},", "2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([", "potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\": {}, \"competed", "\"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\":", "savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260,", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\":", "15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443,", "{ \"2009\": None, \"2010\": None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ {", "7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\":", "30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]),", "{ \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}},", "numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\": {", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\":", "in the dict that has missing content; this # value is given as", "handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict =", "4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon costs)\":", "secnd_adj_key (string): Key used to link primary and secondary market microsegments (by climate,", "energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577,", "6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2,", "\"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"],", "\"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\":", "measure #1. sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict): Sample commercial measure", "cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist =", "numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709),", "2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114,", "with array inputs.\"\"\" # Initialize test measure and assign it a sample 'uncompeted'", "with array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset", "that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side Measure objects", "numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0,", "generated for each Measure object in 'measures_all' following competition and supply-demand overlap adjustments.", "{ \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}},", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\":", "0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test", "0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301,", "potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20},", "0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\":", "supply-side Measure objects and associated contributing microsegment keys that overlap with 'measures_demand_dist' Measure", "0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/", "0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1),", "7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "\"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\":", "cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\":", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]),", "numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279),", "numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682,", "\"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market shares and updates master microsegments", "numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": {", "1\": 105, \"rate 2\": 110, \"rate 3\": 115, \"rate 4\": 120, \"rate 5\":", "{ \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": {", "numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36,", "{\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\":", "contributing microsegment keys that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List of", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\":", "{}}}} self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None,", "{} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\": 1,", "\"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])},", "\"commercial\": { \"2009\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100,", "-5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100,", "{ \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete", "{\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\":", "measure competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove", "\"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\":", "\"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\":", "10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10,", "\"commercial\": { \"2009\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70,", "20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10,", "tuples for the items # identified, where in the case of a dict,", "secondary microsegments to adjust. a_run (object): Analysis engine object incorporating all 'measures_primary' objects.", "0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\":", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366,", "numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2,", "{\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\":", "the 'calc_savings_metrics' function. Verify that measure master microsegment inputs yield expected savings and", "numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5),", "'measures_supply' Measure objects. a_run (object): Analysis engine object incorporating all 'measures_all' objects. measures_all_dist", "numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\":", "\"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\":", "30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": {", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\":", "\"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None }, \"commercial\": { \"2009\":", "\"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\": {", "6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\":", "\"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years =", "20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": {", "\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\":", "{ \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0,", "energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33,", "4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "\"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\":", "and associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict):", "45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}},", "'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this test suite test_meas = run.Measure(self.handyvars,", "\"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\":", "numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2,", "the keys are equal; this should fail if one of the dicts #", "and associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict):", "\"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\":", "{ \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "\"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0,", "\"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\":", "0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market", "\"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "\"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\":", "numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": {", "variables across the class. measure_list (list): List for Engine including one sample residential", "\"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "(object): Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side", "the class. sample_measure_res (object): Sample residential measure data. sample_measure_com (object): Sample commercial measure", "6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": {", "run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array test measure consumer #", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887,", "13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341,", "# Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'),", "def __init__(self): self.sample_measure = { \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None,", "1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\": {\"2009\":", "5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample", "\"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\":", "35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30,", "\"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\",", "\"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": {", "\"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\":", "22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183},", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0,", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\":", "None, None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self):", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed", "# market ('ok_master_mseg_dist3'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res)", "{\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018),", "portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist1' with a residential sample", "[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1,", "6\": 110, \"rate 7\": 115}, { \"rate 1\": 205, \"rate 2\": 100, \"rate", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736,", "numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547),", "None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"],", "numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([", "\"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\":", "\"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\":", "(LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys", "{ \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": {", "names self.assertEqual(k, k2) # If the recursion has not yet reached the terminal/leaf", "5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg", "{\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\":", "inputs.\"\"\" # Run the measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist,", "energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([", "cooling measure 1 including lists of stock cost input values instead of point", "\"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15,", "5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\": {", "instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid input", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\":", "\"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity", "\"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897),", "{ \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\":", "\"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and", "[run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions", "25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}},", "and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist2' with a residential", "2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None},", "\"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\":", "self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist,", "\"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\":", "20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "\"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07,", "measure energy savings. ok_ecostsave (int): Sample measure energy cost savings. ok_csave (int): Sample", "correct function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 =", "= 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt =", "\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\":", "1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785,", "and that 'secondary_adj' correctly adjusts any secondary markets associated with these primary market", "of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test", "0.4, 0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for", "object incorporating all 'measures_all' objects. measures_all_dist (list): List including competing/interacting sample Measure objects", "8], [-10, 14, 2, 3, 4], [-10, 0, 1, 2], [10, 4, 7,", "13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),", "structure, k and k2 are the keys that correspond to # the dicts", "that valid inputs yield correct anpv, irr, payback, and # cost of conserved", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings", "value test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind]", "\"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "comparable structure # to the normal output from zip_longest() fill_val = ('substituted entry',", "\"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings", "-1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([", "\"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\":", "\"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {", "35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35},", "7\": -370}, \"2010\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145,", "-1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy", "\"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\":", "numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": {", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\":", "dict2, respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm", "\"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}]", "4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\":", "None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None},", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7,", "10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\":", "1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\":", "\"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\":", "(grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure", "functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance =", "(dict): Master market microsegments that should be generated for each Measure object in", "= [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100}", "\"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\":", "finalize array test measure consumer # metrics consumer_metrics_dist = [{ \"stock cost\": {", "\"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = {", "handyvars (object): Useful variables across the class. sample_measure_res (object): Sample residential measure data.", "\"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None,", "(competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all", "\"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 #", "\"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": {", "\"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45,", "For first test case, verify correct adoption/competition scenario # keys for measure markets/savings/portfolio", "5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\":", "0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and", "\"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\":", "the keys that correspond to # the dicts or unitary values that are", "run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify", "11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 69,", "{\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\":", "numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07,", "for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": {", "\"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219,", "2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\":", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": {", "costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888,", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample", "total energy or carbon market/savings value. Attributes: a_run (object): Sample analysis engine object.", "microsegment keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments", "\"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\":", "\"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "# heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master", "4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy", "scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [", "\"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08,", "= [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8],", "\"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\":", "7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "\"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key1: { \"stock\": { \"total\":", "{ \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": {", "\"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy", "\"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\":", "20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10},", "energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([", "\"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\":", "measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply", "numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2,", "and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on", "If the recursion has not yet reached the terminal/leaf node if isinstance(i, dict):", "\"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\":", "0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\":", "on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on", "1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785,", "\"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\":", "36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18},", "{ \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0,", "measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to", "34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\":", "\"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: {", "numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "\"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": {", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption", "numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44,", "cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "a residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "[5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\":", "point values. compete_meas4 (dict): Sample residential supply-side cooling measure 2. compete_meas5 (dict): Sample", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "\"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify", "0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\":", "some array inputs.\"\"\" # Run the measure competition routine on sample demand-side measures", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max", "(w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\":", "\"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}},", "7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case, verify", "all point values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment", "\"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing", "34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "1, 1, 5, 7, 8], [-10, 14, 2, 3, 4], [-10, 0, 1,", "3. compete_meas_dist (dict): Alternative version of sample commercial supply-side lighting measure 1 including", "{\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333,", "-0.10}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {},", "metrics that should be generated given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2", "\"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}},", "cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary',", "markets associated with these primary market microsegments. Attributes: handyvars (object): Useful variables across", "\"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None},", "master microsegment including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure", "\"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\":", "outputs. Attributes: handyvars (object): Useful variables across the class. sample_measure_res (object): Sample residential", "ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary'", "cls.compete_meas2 = { \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\":", "\"rate 5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\":", "-60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\": { \"rate", "\"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3,", "cls.compete_meas2 = { \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "\"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\":", "\"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\":", "6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21,", "{} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "\"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost", "2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\":", "# Record the output for the test run of the 'metric_update' # function", "**x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2]", "\"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1,", "and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure 4\",", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975,", "\"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\":", "numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])},", "\"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\":", "\"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1", "{\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\":", "# Test that valid input cashflows yield correct output payback values for idx,", "\"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", ".35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45,", "\"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array(", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\":", "4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/", "-2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08,", "numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([", "\"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\":", "\"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\":", "\"mseg_adjust\": { \"contributing mseg keys and values\": {}, \"competed choice parameters\": {}, \"secondary", "different key names self.assertEqual(k, k2) # If the recursion has not yet reached", "[20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "{ \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": {", "7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None },", "Other financial metric values that should be generated given valid sample inputs. \"\"\"", "\"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\":", "{\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24},", "11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\":", "range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node, formatted as a", "At the terminal/leaf node, formatted as a numpy array # (for input uncertainty", "instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output for", "(LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\",", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]),", "22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}},", "\"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\":", "cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": {", "= [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1',", "recursively traverse the dict self.dict_check(i, i2) # At the terminal/leaf node, formatted as", "self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj(", "\"\"\"Test output given commercial measure with point value inputs.\"\"\" # Initialize test measure", "\"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon", "\"rate 6\": 100, \"rate 7\": 110}, \"2010\": { \"rate 1\": 50, \"rate 2\":", "{\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\":", "energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and", "1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]] cls.ok_out = [5.14,", "\"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\":", "numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1,", "5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20,", "-0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/", "11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}]", "{ \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34,", "10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "\"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\":", "\"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "commercial measure data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float): Sample discount", "contributing microsegment keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market", "\"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"],", "data. ok_partitions (dict): Sample results partitioning fraction. ok_out (dict): Sample partitioned measure results", "{ \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}},", "\"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}}", "0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021),", "\"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\":", "110}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\":", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22,", "\"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None},", "\"energy cost\": { \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None,", "\"total\": { yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5", "\"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}},", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": {", "\"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\":", "{ \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])},", "\"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\":", "\"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\": { \"rate 1\":", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) #", "numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07,", "of supply-side Measure objects and associated contributing microsegment keys that overlap with 'measures_demand'", "\"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22,", "-100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize point value test measure", "{}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": {", "# Run the measure competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1,", "0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\":", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\":", "numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\":", "15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "market microsegment key chain being tested. secnd_adj_key (string): Key used to link primary", "\"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7,", "0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0,", "elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x],", "numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671),", "\"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0,", "{\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35},", "{ \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0,", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": {", "10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\":", "for tests. Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2 (dict): Sample residential", "# Create Engine instance using test measure, run function on it engine_instance =", "\"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\":", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\":", "(dict): Sample unpartitioned measure results data. ok_partitions (dict): Sample results partitioning fraction. ok_out", "10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None,", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00,", "# Create an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) #", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}},", "50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": {", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\":", "0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\":", "None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\":", "\"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08,", "None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": {", "\"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\":", "26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}},", "15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\":", "5.2) # In this structure, k and k2 are the keys that correspond", "\"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\":", "19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136,", "0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure c2\",", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}},", "numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\":", "8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455,", "{ \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}},", "2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None},", "{\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40},", "\"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\":", "correctly calculates primary market shares and updates master microsegments for a series of", "\"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\":", "Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List", "-440, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160,", "\"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output", "\"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\":", "test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" # Run", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) #", "\"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\":", "\"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\":", "isinstance(i, dict): # Test that the dicts from the current keys are equal", "115}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": {", "25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12,", "20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\":", "0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95},", "4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152,", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res =", "\"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\":", "{ \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}},", "{ \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": {", "{ \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\":", "{ \"2009\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate", "the dict that has missing content; this # value is given as a", "20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10,", "cls.measures_all_dist) # Set information needed to finalize array test measure consumer # metrics", "7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\":", "2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate", "numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])},", "1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07,", "\"rate 6\": -160, \"rate 7\": -170}, \"2010\": { \"rate 1\": -135, \"rate 2\":", "2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11,", "29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975,", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455},", "microsegment keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side", "}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2,", "numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\":", "\"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')):", "residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version of sample residential supply-side", "# in the tuple is the key and the second item is the", "CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary market", "for correct outputs given valid inputs.\"\"\" # Create an Engine instance using sample_measure", "\"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\":", "'measures_primary' objects. measures_all_dist (list): List of competing measures including some measures with array", "100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array(", "numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07,", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775,", "2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501,", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499,", "\"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10,", "size, # zip_longest() will use the fill value created below as a #", "\"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using test measure, run function on", "2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]),", "anpv, irr, payback, and # cost of conserved energy/carbon outputs for ind, x", "10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": {", "7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5,", "\"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure", "correspond to # the dicts or unitary values that are found in i", "ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy, carbon, and energy/carbon cost arrays.", "0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback", "= cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1',", "21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550,", "5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete measure", "\"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure", "4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize", "numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([", "10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}},", "and values\": { cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated. Attributes: sample_measure (object):", "\"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15,", "0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1", "\"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\":", "a residential sample measure. ok_out_dist1 (dict): Measure attribute update status, savings, and portfolio/consumer-level", "\"rate 4\": 110, \"rate 5\": 115, \"rate 6\": 120, \"rate 7\": 125}, {", "8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0,", "\"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2,", "home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None},", "{ \"nested key 1\": [1, 2, 3, 4, 5], \"nested key 2\": 5},", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "# metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\":", "7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2,", "120, \"rate 5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\": {", "\"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\": 80, \"rate", "6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501},", "in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [", "one sample residential measure. ok_cashflows (list): Set of sample input cash flows. ok_out", "test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using", "secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\":", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\":", "inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "{ \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])},", "4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\": {", "\"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\":", "(grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)',", "\"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\":", "zip() and zip_longest() produce tuples for the items # identified, where in the", "4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40,", "20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\":", "carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\":", "array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist", "{\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\":", "\"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}},", "-9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([", "7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\":", "\"measure\": 1}}, { \"stock\": { \"total\": { \"all\": { \"2009\": 30, \"2010\": 30},", "{\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\":", "\"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353},", "sample measure object. attribute_dict (dict): Dict of sample measure attributes. \"\"\" @classmethod def", "-0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08,", "{\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44},", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\":", "}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": {", "numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": {", "avoided carbon emissions. ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts (list): Output", "for x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"],", "\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\":", "5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30,", "1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\":", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682,", "\"\"\"Ensure that measure attributes are correctly initiated. Attributes: sample_measure (object): Residential sample measure", "\"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\":", "100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\":", "80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\": { \"rate", "\"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\":", "energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77,", "2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])},", "1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251,", "\"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\":", "self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation", "as a tuple to be of comparable structure # to the normal output", "Measure objects. a_run (object): Analysis engine object incorporating all 'measures_all' objects. measures_all_dist (list):", "\"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353,", "15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227,", "{ cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\":", "\"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\":", "15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}},", "{\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\":", "7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "to adjust. a_run (object): Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist (list):", "27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650},", "\"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\":", "1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([", "7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "\"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {", "home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\":", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3])", "1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20},", "{ \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": {", "-160, \"rate 7\": -170}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\":", "39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455,", "competing residential measures; and that 'htcl_adj' properly accounts for heating and cooling supply-demand", "cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary", "2\": 100, \"rate 3\": 105, \"rate 4\": 110, \"rate 5\": 115, \"rate 6\":", "energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032,", "16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273,", "sample string for competed demand-side and supply-side market microsegment key chain being tested.", "\"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\":", "{\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings", "\"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\":", "{ \"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\": 90, \"rate", "{}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = {", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\":", "adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": {", "\"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\":", "15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary(", "\"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543,", "cls.compete_meas5 = { \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "\"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\":", "self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) #", "supply-side Measure objects and associated contributing microsegment keys that overlap with 'measures_demand' Measure", "\"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock cost", "be compared dict2 (dict): Second dictionary to be compared Raises: AssertionError: If dictionaries", "financial metrics that should be generated given 'ok_master_mseg_dist2' with a residential sample measure.", "Subset of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object): Analysis engine object", "cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single", "for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())),", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\":", "10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20,", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics", "of common sample measures for tests. Attributes: sample_measure (dict): Sample residential measure #1.", "12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10,", "18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40},", "5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}},", "\"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\":", "\"stock cost\": { \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None,", "2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": {", "17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89,", "numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])},", "0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = {", "\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\":", "{ \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([", "\"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "{\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\":", "10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": {", "6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "function converts terminal/leaf node lists in a dict to numpy arrays. Attributes: handyvars", "\"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\":", "measure consumer # metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\": { \"2009\":", "\"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": {", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]),", "26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227},", "\"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\":", "\"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\":", "\"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\":", "\"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\":", "number of competed units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample", "ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave (int): Sample measure energy savings.", "demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977,", "{ \"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": {", "{ \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 95,", "-0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\":", "for a series of competing residential measures; and that 'htcl_adj' properly accounts for", "\"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": {", "\"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50,", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "(total captured)\": {}, \"original energy (competed and captured)\": {}, \"adjusted energy (total captured)\":", "a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of", "6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100,", "given sample measures w/ point value inputs.\"\"\" # Run measure competition routine on", "11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\": { \"Technical", "0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback", "\"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\":", "that overlap with 'measures_supply' Measure objects. a_run (object): Analysis engine object incorporating all", "-5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1,", "\"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\":", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3])", "m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "= self.ok_master_mseg_dist2 # Create Engine instance using test measure, run function on it", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level", "measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply", "10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\":", "219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\":", "-400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\":", "5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\":", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\":", "{ \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": {", "-1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625),", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life =", "-2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\":", "{\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\":", "be generated given valid sample inputs. ok_out_array (list): Other financial metric values that", "4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\":", "microsegments to adjust. a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap", "test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" # Run", "\"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\":", "(object): Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist (list): List of competing", "[{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10,", "[0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20},", "-1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc", "(int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample", "0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create", "be generated given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict): Measure attribute", "{ \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": {", "-150, \"rate 7\": -400}, \"2010\": { \"rate 1\": -350, \"rate 2\": -60, \"rate", "\"rate 3\": 120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate", "{ \"2009\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate", "12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6},", "chain being tested. compete_meas1 (dict): Sample residential demand-side cooling measure 1. compete_meas1_dist (dict):", "len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node, formatted as a point", "key and the second item is the value; # in the case where", "the dicts or unitary values that are found in i and i2, #", "0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) #", "cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\":", "d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and", "\"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\":", "{ \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10 for yr", ".10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20,", "-150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\":", "home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"],", "{ \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683,", "40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20,", "\"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": {", "where the dicts are not of identical size, # zip_longest() will use the", "sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "{ yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for", "{ \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])},", "r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366,", "energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}},", "\"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}},", "the first item # in the tuple is the key and the second", "numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300,", "13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": {", "1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key1: {", "objects. measures_overlap2_dist (dict): List of demand-side Measure objects and associated contributing microsegment keys", "\"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments", "15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": {", "10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\":", "= 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6,", "-40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\": -60, \"rate 5\": -65,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -40, \"rate 2\": -50, \"rate", "20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\":", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: {", "{ \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": {", "\"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\":", "\"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\":", "numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr", "{\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000,", "6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\":", "residential sample measure. ok_out_dist3 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "\"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": {", "0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\": {", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify", "11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46,", "\"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\":", "\"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\":", "0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07,", "updates master microsegments for a series of competing residential measures; and that 'htcl_adj'", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": {", "energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\":", "-200}, \"2010\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate", "costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585,", "values that should be generated given valid sample inputs. \"\"\" @classmethod def setUpClass(cls):", "31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": {", "Check updated competed master microsegments for each sample measure # following competition/supply-demand overlap", "measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure", "key names self.assertEqual(k, k2) # If the recursion has not yet reached the", "51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\":", "\"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25},", "numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([", "numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\":", "self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each sample", "= [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\":", "self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\":", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": {", "market ('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "set of sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\":", "\"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\":", "objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir,", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure c2 dist\", \"climate_zone\":", "\"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\",", "{\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10,", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\":", "\"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([", "savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\":", "self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node, formatted as a point value", "10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "\"\"\"Test outcomes given valid sample measures w/ point value inputs.\"\"\" # Run the", "0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\"", "\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\":", "40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}},", "with lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "\"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\":", "{\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": {", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1,", "numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\":", "\"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\":", "\"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "Sample residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version of sample residential", "values. compete_meas2 (dict): Sample residential demand-side cooling measure 2. compete_meas3 (dict): Sample residential", "\"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector", "\"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\":", "14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]),", "def test_ok(self): \"\"\"Test for correct function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk(", "keys and values\": {}, \"competed choice parameters\": {}, \"secondary mseg adjustments\": { \"market", "# metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\":", "\"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\":", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": {", "\"2009\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\":", "\"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\":", "generated given valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "\"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\":", "and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist1' with a residential", "self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key,", "\"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str( ('primary',", "\"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\":", "measure 2. compete_meas3 (dict): Sample residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative", "\"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical", "supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj)", "{\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\":", "\"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626,", "0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon", "{ \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\":", "0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0,", "and dict2, respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): #", "45, 61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300},", "\"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme =", "and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) #", "lists of energy/carbon and associated cost input values instead of point values. compete_meas2", "objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir,", "w/ point value inputs.\"\"\" # Run the measure competition routine on sample demand-side", "{ \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": {", "adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample", "{\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\":", "100, \"rate 3\": 105, \"rate 4\": 110, \"rate 5\": 115, \"rate 6\": 120,", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": {", "measures; and that 'secondary_adj' correctly adjusts any secondary markets associated with these primary", "\"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13,", "11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\":", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\":", "{ \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\": 130,", "5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "{ \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": {", "numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/", "{\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": {", "\"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine", "(object): Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj", "'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary',", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single", "-0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125),", "# the keys are equal; this should fail if one of the dicts", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial", "Instantiate engine object based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": {", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]),", "{ \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": {", "-1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost", "with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out", "{ \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44,", "20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {", "measures w/ some array inputs.\"\"\" # Run the measure competition routine on sample", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level", "0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": {", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\":", "input values instead of point values. compete_meas2 (dict): Sample residential demand-side cooling measure", "compete_meas_dist (dict): Alternative version of sample commercial supply-side lighting measure 1 including lists", "1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {", "carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08},", "cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave", "11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\":", "numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": {", "20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}},", "0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835,", "numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None,", "None}}}] # Adjust/finalize point value test measure consumer metrics for ind, m in", "__init__(self): self.sample_measure = { \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\":", "\"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\":", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0,", "(list): List for Engine including one sample residential measure. ok_cashflows (list): Set of", "\"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\":", "{ \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": {", "\"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": {\"2009\": -5,", "numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this test suite test_meas", "3, 4], [-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100, 0,", "0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667,", "it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case,", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "contributing microsegment keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of", "10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "given valid sample measures w/ some array inputs.\"\"\" # Run the measure competition", "\"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\":", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\":", "[{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17,", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 =", "of all competing/interacting sample Measure objects with point value inputs. measures_demand (list): Demand-side", "{ \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": {", "yield correct anpv, irr, payback, and # cost of conserved energy/carbon outputs for", "20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15,", "22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}},", "{ \"contributing mseg keys and values\": {}, \"competed choice parameters\": {}, \"secondary mseg", "{ \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1,", "metrics\": False}, { \"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1,", "{\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909),", "one of the dicts # is empty, is missing section(s), or has different", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]),", "{ \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200,", "1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -135, \"rate 2\": -140, \"rate", "\"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\": None,", "\"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars,", "numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950,", "microsegment key chain being tested. adjust_key2 (string): Second sample string for competed demand-side", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -435, \"rate 2\":", "\"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\":", "test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out", "numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397),", "PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function. Verify that measure master", "{\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "\"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\":", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\":", "numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([", "2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346),", "{\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\":", "\"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand", "ok_cashflows (list): Set of sample input cash flows. ok_out (list): Outputs that should", "numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08,", "(list): List including competing/interacting sample Measure objects with array inputs. measures_demand_dist (list): Demand-side", "numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": {", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) },", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\":", "numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622),", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings", "0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}}", "2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": {", "0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253,", "6\": -115, \"rate 7\": -120}, \"2010\": { \"rate 1\": -90, \"rate 2\": -95,", "100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete measure r4\",", "\"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\":", "residential measure with point value inputs.\"\"\" # Initialize test measure and assign it", "\"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "{ \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": {", "\"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\":", "use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure", "2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}},", "= run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA", "numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}},", "#2. sample_measure3 (dict): Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure = {", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\":", "enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures", "measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary", "(dict): Alternative version of sample commercial supply-side lighting measure 1 including lists stock", "90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45,", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings", "11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]),", "(int): Sample measure avoided carbon emissions. ok_ccostsave (int): Sample measure avoided carbon costs.", "measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects", "{ \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out =", "numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\":", "[\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical", "'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side Measure objects and associated contributing", "[\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"],", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}},", "10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\":", "-3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, {", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\":", "Measure objects. measures_overlap2 (dict): List of demand-side Measure objects and associated contributing microsegment", "None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\":", "numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])},", "{ \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5,", "2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114,", "numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": {", "16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02,", "cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample", "\"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\":", "2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": {", "\"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key:", "2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835,", "self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each sample measure", "cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\":", "self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure", "cost of conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if x is", "List of all competing/interacting sample Measure objects with point value inputs. measures_demand (list):", "10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist =", "\"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\":", "metric values that should be generated given valid sample inputs. \"\"\" @classmethod def", "30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\":", "1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\":", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5,", "and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for", "\"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\":", "1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\":", "58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\":", "4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]),", "\"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10,", "{ cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key:", "\"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "measure results data. ok_partitions (dict): Sample results partitioning fraction. ok_out (dict): Sample partitioned", "20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250,", "-0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": {", "{ \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200,", "\"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\":", "\"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\":", "sample commercial supply-side lighting measure 1 including lists stock cost input values instead", "numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736,", "\"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "\"nested key 1\": [1, 2, 3, 4, 5], \"nested key 2\": 5}, \"key", "function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first", "and associated contributing microsegment keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object):", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253,", "\"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\":", "that should be generated given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod", "13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5,", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2,", "\"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\":", "{ \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": {", "\"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\":", "the measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) #", "{ \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "{\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300},", "\"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\":", "**sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions =", "0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]),", "for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist", "status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist2' with", "\"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10,", "d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid", "ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure stock cost", "cls.measures_all) # Set information needed to finalize point value test measure # consumer", "\"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\":", "MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update' function. Verify that cashflow inputs", "of sample residential demand-side cooling measure 1 including lists of energy/carbon and associated", "\"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\":", "demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check", "lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock cost and measure", "95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}}},", "2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2,", "0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": {", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\":", "0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update' function. Verify", "13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250,", "numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": {", "# Set information needed to finalize array test measure consumer # metrics consumer_metrics_dist", "-230, \"rate 7\": -200}, \"2010\": { \"rate 1\": -190, \"rate 2\": -195, \"rate", "self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function.", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\":", "21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\":", "2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2,", "2\": -140, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\":", "'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand',", "200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106,", "1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95,", "5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\":", "5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": {", "captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\":", "20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10,", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -350, \"rate", "Record the output for the test run of the 'metric_update' # function function_output", "{ \"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205,", "{ \"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": {", "\"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\":", "\"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\":", "Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update status self.dict_check(engine_instance.measures[", "the dict self.dict_check(i, i2) # At the terminal/leaf node, formatted as a numpy", "\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\":", "100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": {", "\"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "tested. overlap_key_scnd (string): Second sample string for secondary market microsegment key chain being", "0, \"2010\": 0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0,", "all lines below this point in all # test files) def main(): \"\"\"Trigger", "\"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys", "\"stock cost\": { \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None,", "20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "First dictionary to be compared dict2 (dict): Second dictionary to be compared Raises:", "0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]),", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": {", "0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\":", "2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54,", "\"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\":", "ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary", "{ \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -150, \"2010\":", "the current keys are equal self.assertCountEqual(i, i2) # Continue to recursively traverse the", "\"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925,", "\"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]),", "\"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "{ \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\":", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}},", "supply-side lighting measure 3. compete_meas_dist (dict): Alternative version of sample commercial supply-side lighting", "\"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091),", "update status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist4'", "compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\",", "\"energy cost\": { \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None,", "'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]}", "measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of", "0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\":", "\"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings", "\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\":", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array(", "with supply-demand overlap data. adjust_key1 (string): First sample string for competed demand-side and", "found in i and i2, # respectively, at the current level of the", "12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": {", "{ \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -200, \"2010\":", "9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]),", "{ \"2009\": None, \"2010\": None}}}] # Adjust/finalize point value test measure consumer metrics", "List for Engine including one sample residential measure. ok_cashflows (list): Set of sample", "should be generated given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict): Measure", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114},", "{ \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": {", "\"rate 7\": -170}, \"2010\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\":", "\"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\":", "('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj =", "\"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\":", "5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689,", "that correspond to # the dicts or unitary values that are found in", "numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": {", "{\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance", "0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3", "\"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\":", "{\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": {", "float])])) # Offer external code execution (include all lines below this point in", "\"commercial\": { \"2009\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70,", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]),", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": {", "27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\":", "\"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\":", "current location in the dict structure, # the keys are equal; this should", "\"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\":", "[\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\":", "i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current", "Run the measure competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme)", "self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand", "13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\":", "108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10,", "-0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604,", "Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the", "\"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"],", "numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": {", "\"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11,", "measure master microsegment including stock cost and measure lifetime array. ok_out_point_res (dict): Measure", "10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": {", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1,", "Attributes: a_run (object): Sample analysis engine object. ok_total (dict): Sample unpartitioned measure results", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 100,", "2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "2\": 10.8}, \"Max adoption potential\": { \"key 1\": { \"nested key 1\": [0.5,", "\"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065,", "-190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200},", "numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([", "yield correct output payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2)", "0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test", "{ \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": {", "\"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07,", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5", "{ \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": {", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16,", "numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}},", "None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100,", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}},", "sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "\"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]),", "and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "\"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": {", "= self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics'", "it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this test suite", "self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update' function. Verify that", "\"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\":", "0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0,", "\"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\":", "captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\":", "update status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist2'", "10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": {", "home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\":", "2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy", "= run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using test", "for competed primary market microsegment key chain being tested. overlap_key_scnd (string): Second sample", "15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated", "\"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out", "5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]),", "\"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy", "carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286,", "numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])},", "1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571},", "-1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "{ cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1,", "(competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0,", "savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist4' with a", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "{ \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724,", "1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07,", "10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\":", "\"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25,", "\"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self):", "22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]),", "numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\":", "\"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\":", "105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\":", "2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302,", "\"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}},", "all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality of two dicts.", "measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None,", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0])", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\":", "\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array(", "-2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08,", "19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136,", "Sample unpartitioned measure results data. ok_partitions (dict): Sample results partitioning fraction. ok_out (dict):", "chain being tested. secnd_adj_key (string): Key used to link primary and secondary market", "market ('ok_master_mseg_dist4'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183,", "\"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\":", "it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this test suite", "0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "{ \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\":", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([", "{ \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}},", "(dict): Dict of sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "\"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None,", "\"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\":", "in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample", "1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5,", "\"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\":", "{ \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8,", "{}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure c2", "cooling measure 1 including lists of energy/carbon and associated cost input values instead", "\"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\":", "\"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP',", "incorporating all 'measures_all' objects. measures_all_dist (list): List including competing/interacting sample Measure objects with", "1, 1, 1, 1, 5, 7, 8], [-10, 14, 2, 3, 4], [-10,", "family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\":", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": {", "run # Import needed packages import unittest import numpy import copy import itertools", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure", "11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "each sample measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures):", "m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2),", "\"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\":", "-200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, {", "a residential sample measure. ok_out_dist2 (dict): Measure attribute update status, savings, and portfolio/consumer-level", "to be of comparable structure # to the normal output from zip_longest() fill_val", "\"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571,", "numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346),", "measures w/ some array inputs.\"\"\" # Run measure competition routine on sample measures", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create", "\"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\":", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\":", "7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total", "measure #1. \"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample measure 1\", \"active\":", "{\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": { \"Heating\":", "numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3,", "{ \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": {", "2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side Measure objects and associated contributing", "self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each sample", "-0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59, 2,", "24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}},", "= self.ok_master_mseg_dist1 # Create Engine instance using test measure, run function on it", "{ \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": {", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given", "40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "k and k2 are the keys that correspond to # the dicts or", "\"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\":", "\"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "(list): Subset of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object): Analysis engine", "16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}},", "measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of", "objects and associated contributing microsegment keys that overlap with 'measures_supply' Measure objects. a_run", "numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": {", "{ \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50,", "2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\":", "should be generated for each Measure object in 'measures_all_dist' following competition and supply-demand", "5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "{ \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954,", "with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments that should be generated", "and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and", "'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict): Measure attribute update status, savings,", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\":", "{ \"rate 1\": 105, \"rate 2\": 110, \"rate 3\": 115, \"rate 4\": 120,", "-8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\":", "tested. adjust_key2 (string): Second sample string for competed demand-side and supply-side market microsegment", "1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {", "measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\":", "\"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\":", "2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate", "demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the", "scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): {", "1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20},", "# Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_point'),", "{ \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": {", "microsegments. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample consumer", "115, \"rate 6\": 120, \"rate 7\": 125}, { \"rate 1\": 105, \"rate 2\":", "each Measure object in 'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict):", "\"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 100, \"2010\":", "captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = {", "the recursion has not yet reached the terminal/leaf node if isinstance(i, dict): #", "22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\":", "partitioned measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "'existing']\": { \"total\": { yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\": {", "\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\":", "\"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key 1\": { \"nested", "energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample", "\"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "{ \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2,", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}},", "cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string):", "needed to finalize array test measure consumer # metrics consumer_metrics_dist = [{ \"stock", "[20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single", "the 'metrics_update' function. Verify that cashflow inputs generate expected prioritization metric outputs. Attributes:", "\"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16,", "string for competed primary market microsegment key chain being tested. overlap_key_scnd (string): Second", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\":", "using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows", "4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}},", "running all test fixtures in the file.\"\"\" unittest.main() if __name__ == \"__main__\": main()", "7\": 125}, { \"rate 1\": 105, \"rate 2\": 110, \"rate 3\": 115, \"rate", "\"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\":", "def test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an Engine", "lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock cost.", "\"commercial\": { \"2009\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55,", "0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "{\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\":", "\"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\":", "self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures self.a_run.compete_res_primary(", "ok_num_units (int): Sample number of competed units. ok_base_life (int): Sample baseline technology lifetime.", "\"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\":", "climate, building type, structure type). compete_meas1 (dict): Sample commercial supply-side lighting measure 1.", "{ \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20,", "'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\":", "\"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5},", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2,", "\"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\":", "= { \"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function. Verify that measure master microsegment", "[run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost =", "carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22,", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\":", "dicts # is empty, is missing section(s), or has different key names self.assertEqual(k,", "[numpy.ndarray, int, float])])) # Offer external code execution (include all lines below this", "0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and", "compete_meas2 (dict): Sample residential demand-side cooling measure 2. compete_meas3 (dict): Sample residential supply-side", "\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\":", "demand-side and supply-side market microsegment key chain being tested. adjust_key2 (string): Second sample", "{ \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}},", "numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0,", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604,", "metrics outputs. Attributes: handyvars (object): Useful variables across the class. sample_measure_res (object): Sample", "carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333,", "financial metrics that should be generated given 'ok_master_mseg_dist4' with a residential sample measure.", "savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}},", "r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "\"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "\"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure", "of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects and associated contributing microsegment", "\"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\":", "{ \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": {", "7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "{ \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}},", "class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand", "\"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1])", "10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}},", "sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars,", "(competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete", "**x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2]", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\":", "\"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None},", "5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x,", "30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15,", "-200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\":", "for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes", "1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external code execution", "captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "all competing measures with point value inputs. measures_secondary (list): Subset of 'measures_all' with", "\"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15,", "[15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\":", "[\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\":", "\"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service", "in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based", "5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\":", "\"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\":", "\"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "[\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\":", "0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings", "\"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for use in all tests below.\"\"\"", "(dict): Sample partitioned measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "correct output payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class", "'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object): Analysis engine object incorporating all", "competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def", "\"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\":", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test", "15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "-9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\":", "\"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\":", "\"Technical potential\": { \"key 1\": { \"nested key 1\": [1, 2, 3, 4,", "{ \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5,", "\"rate 7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) #", "8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": {", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0])", "{ \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5,", "\"2010\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\":", "Useful variables across the class. sample_measure_res (object): Sample residential measure data. sample_measure_com (object):", "20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5,", "\"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure c2\", \"climate_zone\":", "numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739),", "20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341,", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\":", "\"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize point value test", "\"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227,", "None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]),", "\"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave", "\"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "\"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\":", "{ \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": {", "7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([", "39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341,", "\"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\":", "\"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\":", "0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]),", "4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068,", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868,", "\"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\":", "point values. compete_meas2 (dict): Sample residential demand-side cooling measure 2. compete_meas3 (dict): Sample", "10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}},", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}},", "\"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market shares and", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\":", "{\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\":", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "commercial measure with point value inputs.\"\"\" # Initialize test measure and assign it", "None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\":", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure", "8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12,", "of dict1 and dict2, respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()),", "5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "= cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home',", "0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\":", "'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly',", "# Check updated competed master microsegments for each sample measure # following competition/supply-demand", "3, 4, 5], \"nested key 2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\":", "2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185),", "residential sample measure. ok_out_dist2 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and", "= run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test", "21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5,", "1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio", "\"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150,", "19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\":", "\"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\":", "energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon costs)\":", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])},", "6\": 120, \"rate 7\": 125}, { \"rate 1\": 105, \"rate 2\": 110, \"rate", "1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571,", "potential\": { \"key 1\": { \"nested key 1\": [1, 2, 3, 4, 5],", "\"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\":", "for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes", "\"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\":", "7\": -120}, \"2010\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100,", "{\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40},", "run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1,", "{\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039,", "\"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])},", "-0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333,", "# Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types", "0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\":", "dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods):", "consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars,", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\":", "1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\":", "{ \"contributing mseg keys and values\": { cls.overlap_key: { \"stock\": { \"total\": {", "demand-side Measure objects and associated contributing microsegment keys that overlap with 'measures_supply' Measure", "\"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "\"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\":", "with a residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "\"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\":", "5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0,", "7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}},", "{\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": {", "{ \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571},", "\"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\":", "5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114,", "measure 2. compete_meas3 (dict): Sample commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\":", "measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial", "5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy", "\"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "{\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": {", "\"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]),", "10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02,", "potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\":", "\"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "{ \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\":", "ok_out (dict): Sample partitioned measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "the terminal/leaf node, formatted as a numpy array # (for input uncertainty test", "{ \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {", "missing content; this # value is given as a tuple to be of", "using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output for the", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create", "cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07,", "= CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1,", "commercial measure #1. \"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample measure 1\",", "19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]),", "\"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10,", "\"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\":", "across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure =", "In this structure, k and k2 are the keys that correspond to #", "0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}},", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level", "\"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\":", "{ \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20,", "costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585,", "110, \"rate 5\": 115, \"rate 6\": 120, \"rate 7\": 125}, { \"rate 1\":", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\":", "20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market", "and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should be generated", "across the class. measure_list (list): List for Engine including one sample residential measure.", "\"energy cost\": { \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None,", "27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}},", "status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist3' with", "1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\":", "numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])},", "int, float])])) # Offer external code execution (include all lines below this point", "{\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\":", "11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}},", "\"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\":", "formatted as a numpy array # (for input uncertainty test cases) elif isinstance(i,", "\"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\":", "None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\":", "\"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": {", "\"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None,", "it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results", "affected\": { yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5", "\"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07,", "copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist =", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}},", "{ \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6,", "\"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25,", "cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\":", "2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346),", "ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample measure master microsegment including all", "{\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23,", "{\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398,", "{ \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08,", "23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}},", "8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]),", "adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At", "5\": -110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\": { \"rate 1\": -90,", "2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\":", "value test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind]", "uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0,", "2. compete_meas3 (dict): Sample commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative version", "\"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon", "6\": 150, \"rate 7\": 160}, \"2010\": { \"rate 1\": 100, \"rate 2\": 110,", "5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}},", "objects with array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side", "(w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\":", "Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\":", "\"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {", "0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "(dict): Alternative version of sample residential demand-side cooling measure 1 including lists of", "{ \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {", "numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25,", "None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {},", "= [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": {", "demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2,", "\"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\":", "}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg", "microsegments for a series of competing residential measures; and that 'htcl_adj' properly accounts", "and values\": { cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "\"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366,", "a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this test suite test_meas", "{ \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": {", "\"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\":", "inputs.\"\"\" # Run measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme)", "measure # consumer metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\": { \"2009\":", "{\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\":", "\"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7,", "2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\":", "master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including", "\"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point value test measure consumer", "measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out =", "\"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\":", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics", "\"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": {", "scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample measure master microsegment including", "[\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\":", "9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113,", "self.sample_measure = { \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None,", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {", "\"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341,", "\"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15,", "self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand", "keys are equal; this should fail if one of the dicts # is", "numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\":", "0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/", "'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict):", "\"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\":", "124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7,", "captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}", "numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08,", "the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] =", "\"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\":", "\"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\":", "5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {", "\"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\":", "-135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155,", "\"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\":", "compete_meas3 (dict): Sample residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version of", "None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of", "\"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\"", "self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand", "0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "residential demand-side cooling measure 1 including lists of energy/carbon and associated cost input", "\"stock\": { \"total\": { \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\":", "energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([", "1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "\"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227,", "\"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\":", "1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95,", "{ \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete", "output payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase,", "-100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}]", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\":", "respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that", "{\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]),", "57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": {", "-0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]),", "numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0,", "# market ('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com)", "[[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1',", "energy savings. ok_ecostsave (int): Sample measure energy cost savings. ok_csave (int): Sample measure", "0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\":", "\"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "cls.compete_meas1 = { \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "(dict): Sample results partitioning fraction. ok_out (dict): Sample partitioned measure results data. \"\"\"", "array test measure consumer # metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\":", "sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current location in the dict structure,", "0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2", "0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07,", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\":", "{ \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single", "{\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\":", "measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist", "numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": {", "5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\": {\"2009\":", "objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market", "-1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08,", "5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "\"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test", "\"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1,", "'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample", "{ \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15,", "consumer_metrics_dist = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "**cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys from input", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])},", "{ \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\":", "\"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27),", "5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([", "cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based on above measures", "\"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366,", "\"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([", "100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\":", "cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each", "all # test files) def main(): \"\"\"Trigger default behavior of running all test", "\"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07,", "\"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\":", "sample string for competed primary market microsegment key chain being tested. overlap_key_scnd (string):", "\"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\",", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\":", "27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250,", "\"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\":", "0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}},", "-0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon", "{ \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "[\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\":", "\"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/ point value", "\"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon", "copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2]", "\"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\":", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}},", "19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\":", "valid inputs yield correct anpv, irr, payback, and # cost of conserved energy/carbon", "{ \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": {", "if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase):", "'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that", "sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict): Sample commercial measure #1. \"\"\"", "\"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "**x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate", "}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy", "\"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30},", "Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption", "\"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\":", "numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\":", "= consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist,", "numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3),", "1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\":", "1\": { \"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\":", "\"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\":", "-70, \"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\":", "\"rate 7\": -120}}}}] # Adjust/finalize point value test measure consumer metrics for ind,", "{\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": {", "\"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\":", "self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary'", "partition to a total energy or carbon market/savings value. Attributes: a_run (object): Sample", "30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15,", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\":", "numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07,", "{ \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\": {", "{ \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": {", "'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict): Measure attribute update status, savings,", "'single family home', 'existing']\": { \"total\": { yr: 10 for yr in cls.handyvars.aeo_years},", "data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string): Sample consumer adoption scheme.", "5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 85, \"rate", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\":", "numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977,", "1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": {", "31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\":", "2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251,", "5\": 90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\": { \"rate 1\": 50,", "6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59,", "6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\":", "False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": {", "it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this test suite", "\"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15,", "competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical", "0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": { \"2009\":", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure", "-0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\":", "\"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate", "numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/", "captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}},", "measure_master_msegs_out (dict): Master market microsegments that should be generated for each Measure object", "\"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\":", "5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\":", "numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794),", "46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23,", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", "[\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\":", "on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master", "mseg keys and values\": { cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\":", "across the class. sample_measure (object): Sample measure data with lists to convert. \"\"\"", "-8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07,", "{ \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0,", "\"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\":", "6\": -70, \"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None,", "numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\":", "\"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\":", "1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503,", "\"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function output given valid inputs.\"\"\"", "# following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][", "\"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\":", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\":", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level", "\"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\":", "'payback' function. Verify cashflow input generates expected payback output. Attributes: handyvars (object): Useful", "\"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\":", "input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test", "10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])},", "measures with point value inputs. measures_secondary (list): Subset of 'measures_all' with secondary microsegments", "{ \"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\":", "0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array inputs.\"\"\" #", "status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist4' with", "13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\":", "'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "\"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\":", "numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "(w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\":", "as a numpy array # (for input uncertainty test cases) elif isinstance(i, numpy.ndarray):", "30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15,", "self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield correct anpv,", "cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "{ \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5,", "23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5,", "given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict): Measure attribute update status,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455,", "tested. secnd_adj_key (string): Key used to link primary and secondary market microsegments (by", "0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\":", "[\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"},", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001},", "import itertools import os class CommonTestMeasures(object): \"\"\"Class of common sample measures for tests.", "**self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure, run", "across the class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict", "'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary',", "measure. ok_out_dist2 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "a_run (object): Analysis engine object incorporating all 'measures_all' objects. measures_all_dist (list): List including", "\"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": {", "with point value inputs. measures_secondary (list): Subset of 'measures_all' with secondary microsegments to", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\":", "# following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][", "4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418,", "34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}},", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2,", "Import needed packages import unittest import numpy import copy import itertools import os", "adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap data. adjust_key1 (string): First", "cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample", "{ \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5,", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}},", "{\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "{ \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])},", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) #", "0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\":", "numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([", "-7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock", "point value test measure # consumer metrics consumer_metrics_final = [{ \"stock cost\": {", "{ \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])},", "5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}},", "numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\":", "16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": {", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767,", "2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794),", "energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample", "{ \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": {", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\":", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -135,", "1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30},", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\":", "self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) #", "\"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\":", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) #", "\"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\":", "\"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\":", "\"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]),", "= \"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting',", "CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio =", "and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([", "test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test measure", "savings. ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave (int): Sample measure avoided", "\"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": {", "-8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": {", "measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None,", "CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point", "13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0,", "\"rate 3\": -70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate", "if one of the dicts # is empty, is missing section(s), or has", "adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should be generated for each Measure", "\"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\":", "6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2,", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2,", "\"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}},", "dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the", "\"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\":", "6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "\"rate 7\": 110}, \"2010\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\":", "\"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total", "{ \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": {", "100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\":", "compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self):", "competed master microsegments for each sample measure # following competition/secondary microsegment adjustments for", "2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": {", "savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490,", "self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0])", "\"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\":", "9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "{ \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": {", "-0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]),", "\"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\":", "'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\":", "\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\":", "0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": { \"Technical", ".35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}}", "[ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}},", "{\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\":", "None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\":", "self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113,", "\"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\":", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "{ cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\":", "numpy array # (for input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) ==", "{\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\":", "\"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key2: { \"stock\": { \"total\":", "20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10,", "point value inputs.\"\"\" # Run the measure competition routine on sample demand-side measures", "self.dict_check(i, i2) # At the terminal/leaf node, formatted as a numpy array #", "Sample residential measure data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string): Sample", "for correct function output given valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars,", "= \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in zip([ tested_data[\"key 1\"][\"nested", "{\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\": { \"Heating\":", "operation of the 'calc_savings_metrics' function. Verify that measure master microsegment inputs yield expected", "{ \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671,", "31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592,", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\":", "\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]),", "= run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows yield correct output payback", "\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\":", "\"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\":", "and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) #", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\":", "-200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": {", "{ \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])},", "cashflow input generates expected payback output. Attributes: handyvars (object): Useful variables across the", "\"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\":", "'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply',", "use across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure", "9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array(", "= [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "-120}, \"2010\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate", "numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": {", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"]", "10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60,", "1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\": -60, \"rate 5\":", "[[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14, 2, 3, 4],", "residential measure data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string): Sample consumer", "\"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\":", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 85,", "6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {", "\"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting',", "\"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}},", "11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 23, \"2010\":", "0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": {", "2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external code execution (include all", "microsegments that should be generated for each Measure object in 'measures_all' following competition", "[-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15,", "{\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\":", "\"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\":", "to # the dicts or unitary values that are found in i and", "savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}},", "numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0,", "(competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77,", "\"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794),", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\":", "\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\":", "10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\":", "captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for use in", "\"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"\"\"Define common methods for use in all tests below.\"\"\" def dict_check(self, dict1, dict2):", "34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22,", "self.ok_master_mseg_dist1 # Create Engine instance using test measure, run function on it engine_instance", "of running all test fixtures in the file.\"\"\" unittest.main() if __name__ == \"__main__\":", "{\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2,", "[1, 2, 3, 4, 5], \"nested key 2\": 5}, \"key 2\": 10.8}, \"Max", "'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family", "\"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\":", "10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": {", "\"rate 3\": 115, \"rate 4\": 120, \"rate 5\": 125, \"rate 6\": 10, \"rate", "3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\":", "173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": {", "8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0,", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15,", "\"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\":", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682,", "0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([", "base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars,", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341,", "\"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5,", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = {", "microsegment key chain being tested. compete_meas1 (dict): Sample residential demand-side cooling measure 1.", "[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist =", "0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\":", "('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of", "\"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\":", "[cls.measures_all[1]] # Instantiate engine object based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all)", "self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given", "# heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master", "savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\":", "22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69,", "{\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5", "[\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "\"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\":", "30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40},", "inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'.", "\"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\":", "-0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost", "the engine \"\"\" # Import code to be tested import run # Import", "(dict): Sample measure master microsegment including stock cost and measure lifetime array. ok_out_point_res", "\"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319,", "14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874,", "Attributes: handyvars (object): Useful variables across the class. sample_measure_res (object): Sample residential measure", "None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2,", "3\": 95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\":", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given", "7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg", "{\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60},", "\"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3,", "0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}},", "= measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys from input dict.\"\"\" for", "to a total energy or carbon market/savings value. Attributes: a_run (object): Sample analysis", "run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\"", "str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"],", "for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node,", "44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22},", "adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "{ \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51),", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6,", "\"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\":", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "\"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "\"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"],", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\":", "{ \"key 1\": { \"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested", "measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) #", "[run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand =", "2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\":", "commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative version of sample commercial supply-side", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": {", "numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\":", "= str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}},", "-155, \"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock cost\": { \"residential\": {", "22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15},", "{ \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0,", "measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\":", "= self.ok_master_mseg_dist4 # Create Engine instance using test measure, run function on it", "{\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply and", "1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07,", "self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test", "on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test", "including lists of stock cost input values instead of point values. compete_meas4 (dict):", "1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\":", "measure #2. sample_measure3 (dict): Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure =", "\"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08,", "\"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\":", "List of competing measures including some measures with array inputs. measures_secondary_dist (list): Subset", "\"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "{ \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": {", "\"rate 3\": -190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\":", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\":", "numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])},", "to finalize point value test measure # consumer metrics consumer_metrics_final = [{ \"stock", "competing measures with point value inputs. measures_secondary (list): Subset of 'measures_all' with secondary", "\"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\":", "0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20,", "{ \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": {", "\"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\":", "None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\":", "None, \"2010\": None}}}] # Adjust/finalize point value test measure consumer metrics for ind,", "that should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict):", "run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1,", "energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind],", "cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample", "{ \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply',", "23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\":", "measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of", "8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "\"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": {", "\"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2,", "of comparable structure # to the normal output from zip_longest() fill_val = ('substituted", "\"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\":", "{ \"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "dict): # Test that the dicts from the current keys are equal self.assertCountEqual(i,", "0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\":", "6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2,", "for each set of sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "\"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\":", "\"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7,", "\"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22,", "numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\":", "36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18},", "4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\": {", "0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes", "\"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\":", "measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist =", "{\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24},", "\"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\":", "{ \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock", "= CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio", "{}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "1 including lists of stock cost input values instead of point values. compete_meas4", "\"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical", "carbon costs. ok_out_dicts (list): Output annuity equivalent Net Present Value dicts that should", "ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure stock cost delta.", "a point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes", "numpy.array([ { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\":", "self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure", "lighting measure 1 including lists stock cost input values instead of point values.", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure", "0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\":", "markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data =", "from the current keys are equal self.assertCountEqual(i, i2) # Continue to recursively traverse", "dictionary to be compared dict2 (dict): Second dictionary to be compared Raises: AssertionError:", "\"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting',", "missing section(s), or has different key names self.assertEqual(k, k2) # If the recursion", "-145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}},", "\"rate 6\": 110, \"rate 7\": 115}, { \"rate 1\": 205, \"rate 2\": 100,", "that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments that should", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44,", "dict that has missing content; this # value is given as a tuple", "{\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\",", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2])", "\"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "[{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\":", "\"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5,", "\"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\": 0.2,", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\":", "[run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]]", "numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([", "supply-side market microsegment key chain being tested. compete_meas1 (dict): Sample residential demand-side cooling", "\"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\":", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2,", "{ \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}},", "numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": {", "10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30,", "5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\":", "0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes =", "27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}},", "0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\":", "\"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\":", "42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([", "\"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\":", "values\": { cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array inputs.\"\"\"", "3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\":", "19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}},", "valid sample inputs. ok_out_array (list): Other financial metric values that should be generated", "0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int):", "energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([", "(list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict):", "-0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([", "\"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": {", "{\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([", "version of sample residential supply-side cooling measure 1 including lists of stock cost", "= { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 100, \"rate 2\": 110,", "\"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25,", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -350,", "3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251,", "\"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon", "inputs generate expected prioritization metric outputs. Attributes: handyvars (object): Useful variables across the", "7\": -170}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "\"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0,", "0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34,", "5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50,", "\"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\":", "\"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "\"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\":", "'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows',", "\"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\":", "5, 7, 8], [-10, 14, 2, 3, 4], [-10, 0, 1, 2], [10,", "the items # identified, where in the case of a dict, the first", "run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com", "\"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\":", "{ \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])},", "numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])},", "on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master", "\"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\":", "18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1,", "\"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\":", "\"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\":", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0])", "{\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67,", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": {", "0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption", "22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11},", "numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])},", "26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\":", "{ \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": {", "= os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure)", "def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an Engine", "\"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": {", "{ \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}},", "{ \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": {", "\"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]),", "energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\":", "\"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\":", "AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "\"2010\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\":", "unitary values that are found in i and i2, # respectively, at the", "\"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\":", "\"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\":", "energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00,", "40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736,", "in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) #", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5,", "6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\":", "parameters\": {}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2:", "-380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\": {", "8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34,", "including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment", "11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\":", "def dict_check(self, dict1, dict2): \"\"\"Check the equality of two dicts. Args: dict1 (dict):", "all 'measures_all' objects. measures_all_dist (list): List including competing/interacting sample Measure objects with array", "-8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": {", "{ yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample", "6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5,", "adjust. a_run (object): Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist (list): List", "17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499,", "be generated for each Measure object in 'measures_all_dist' following competition and supply-demand overlap", "1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([", "scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95},", "CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market shares", "6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\":", "\"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\":", "applies a climate zone/building type/end use partition to a total energy or carbon", "2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate", "inputs.\"\"\" # Run measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme)", "dict2 (dict): Second dictionary to be compared Raises: AssertionError: If dictionaries are not", "with point value inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side", "20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}},", "[cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array test", "\"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\":", "20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": {", "numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\":", "None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None},", "\"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\":", "{\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\":", "1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5,", "}, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure r3 dist\", \"climate_zone\":", "{\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": {", "point value inputs.\"\"\" # Run measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all,", "2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output given valid", "9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]),", "{}}}} cls.compete_meas3 = { \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"],", "17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9,", "outcomes given valid sample measures w/ some array inputs.\"\"\" # Run the measure", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years", "11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}},", "test_htcl_adj (dict): Sample dict with supply-demand overlap data. adjust_key1 (string): First sample string", "2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy", "-5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\":", "engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0])", "5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array(", "\"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer", "and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018),", "\"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "a residential sample measure. ok_out_point_com (dict): Measure attribute update status, savings, and portfolio/consumer-level", "\"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\":", "\"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "6\": -160, \"rate 7\": -170}, \"2010\": { \"rate 1\": -135, \"rate 2\": -140,", "{ \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": {", "consumer adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample measure master", "{ \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\":", "\"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "\"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\":", "5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": {", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23,", "'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist", "{\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": {", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\":", "{ \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])},", "m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": {", "15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25,", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10,", "\"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max", "\"rate 3\": -55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate", "100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": {", "{ \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08,", "microsegments for a series of competing commercial measures; and that 'secondary_adj' correctly adjusts", "0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential", "\"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\":", "\"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None,", "\"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5,", "0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "# Continue to recursively traverse the dict self.dict_check(i, i2) # At the terminal/leaf", "{\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\":", "\"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\":", "32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([", "\"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\":", "-50, \"rate 3\": -55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70,", "\"2010\": 2.44}, \"irr (w/ energy and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09},", "100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\":", "{ \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": {", "cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10},", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1,", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\":", "\"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\":", "microsegment keys that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side", "{ \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}},", "and associated cost input values instead of point values. compete_meas2 (dict): Sample residential", "cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\":", "20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10,", "dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_point' with a residential", "22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11,", "142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": {", "13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]),", "in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node, formatted as", "run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For", "secondary market microsegments (by climate, building type, structure type). compete_meas1 (dict): Sample commercial", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed", "= { \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": {", "\"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\":", "{\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\":", "\"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "6, 4.76654), None, None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9]", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2,", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": {", "\"2009\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\":", "\"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\":", "test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an Engine instance", "{}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "\"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\":", "\"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]),", "def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" #", "{ \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": {", "{ \"2009\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate", "numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]),", "(total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\":", "\"2010\": 0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}},", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0,", "numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([", "given residential measure with point value inputs.\"\"\" # Initialize test measure and assign", "function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test", "-2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/", "7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\":", "enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures", "[20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\":", "5], \"nested key 2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\": { \"key", "secondary market microsegment key chain being tested. secnd_adj_key (string): Key used to link", "20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": {", "self.ok_master_mseg_dist4 # Create Engine instance using test measure, run function on it engine_instance", "cost input values instead of point values. compete_meas4 (dict): Sample residential supply-side cooling", "= run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test", "that are found in i and i2, # respectively, at the current level", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}},", "\"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\": None,", "\"contributing mseg keys and values\": {}, \"competed choice parameters\": {}, \"secondary mseg adjustments\":", "subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\":", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate", "numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091),", "yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete", "\"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\":", "\"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\":", "Test that the dicts from the current keys are equal self.assertCountEqual(i, i2) #", "{ \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7,", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23,", "\"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock cost\": {", "Sample commercial measure data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float): Sample", "0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy", "4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback", "\"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] #", "{\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\":", "{\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function output given valid", "scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap data. adjust_key1 (string): First sample", "20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": {", "\"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None,", "1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785,", "0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]),", "0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534,", "# consumer metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\": { \"2009\": 95,", "{\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\":", "array test measure consumer # metrics consumer_metrics = [{ \"stock cost\": { \"residential\":", "\"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\":", "captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\":", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -40, \"rate 2\": -50,", "metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95},", ".40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\":", "\"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([", "\"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\":", "{ \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5},", "(grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\":", "numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\":", "15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10,", "-0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20,", "2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy", "\"\"\"Test for correct function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total)", "2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output given", "\"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455},", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {", "25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1,", "captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\",", "10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30,", "18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\":", "measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "(annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/", "will use the fill value created below as a # substitute in the", "chain being tested. adjust_key2 (string): Second sample string for competed demand-side and supply-side", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\":", "{ \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\":", "{\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40},", "2\": -50, \"rate 3\": -55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\":", "{ \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08,", "\"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\":", "\"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5,", "10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\":", "\"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}},", "0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000,", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "code to be tested import run # Import needed packages import unittest import", "15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([", "else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated.", "run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys from", "numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226,", "\"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\":", "class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level", "44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),", "\"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07,", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\":", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24},", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}},", "cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": {", "traverse the dict self.dict_check(i, i2) # At the terminal/leaf node, formatted as a", "\"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "(grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each sample measure #", "18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme", "0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\":", "numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289),", "and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) #", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\":", "5) }}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07,", "2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2,", "value is given as a tuple to be of comparable structure # to", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -190, \"rate 2\": -195,", "run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[", "\"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\": { \"rate 1\":", "\"rate 1\": 205, \"rate 2\": 100, \"rate 3\": 105, \"rate 4\": 110, \"rate", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure", "'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure objects and associated contributing microsegment", "scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): {", "'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate", "35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7,", "5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {", "adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0,", "{ \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": {", "\"\"\"Compare object attributes to keys from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual(", "{\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "\"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\":", "50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0,", "(competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete measure", "\"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics", "\"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([", "\"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\":", "-4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08,", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": {", "1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector", "16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10,", "{ \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\":", "{\"2009\": 11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\":", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}},", "10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]),", "{\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": {", "\"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065,", "\"rate 6\": 110, \"rate 7\": 115}, \"2010\": { \"rate 1\": 85, \"rate 2\":", "26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341,", "\"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\":", "None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95,", "{}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = {", "0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "5\": -390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\": {", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40,", "to recursively traverse the dict self.dict_check(i, i2) # At the terminal/leaf node, formatted", "13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\":", "\"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "{ \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: {", "18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\":", "\"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\":", "\"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "None, \"2010\": None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate 1\":", "20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost", "sample measure. ok_out_dist4 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\":", "numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon", "{}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\":", "\"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate", "{\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": {", "= -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave =", "150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\":", "5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([", "objects with point value inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list):", "{\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\":", "6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455,", "{\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66},", "21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": {", "-0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243,", "15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}},", "with a residential sample measure. ok_out_dist4 (dict): Measure attribute update status, savings, and", "10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40,", "of point values. compete_meas2 (dict): Sample residential demand-side cooling measure 2. compete_meas3 (dict):", "on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps", "measure data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string): Sample consumer adoption", "40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40},", "{\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\":", "\"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general", "generated for each Measure object in 'measures_all_dist' following competition and supply-demand overlap adjustments.", "40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10,", "instance using test measure, run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics(", "overlaps across the supply and demand sides of # heating and cooling self.a_run.htcl_adj(", "\"residential\": { \"2009\": None, \"2010\": None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([", "\"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\":", "5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15,", "0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333,", "\"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10,", "[\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\":", "residential sample measure. ok_out_dist1 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\":", "10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5,", "\"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\":", "\"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086,", "\"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\":", "\"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using test measure, run function on", "parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\":", "= run.Engine(self.handyvars, self.measure_list) # Record the output for the test run of the", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\":", "{ \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\":", "20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": {", "\"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08,", "\"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35,", "7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/", "competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class", "being tested. overlap_key_scnd (string): Second sample string for secondary market microsegment key chain", "(dict): Sample residential demand-side cooling measure 2. compete_meas3 (dict): Sample residential supply-side cooling", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\":", "120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160},", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\":", "\"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "keys and values\": { cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "[5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market", "{\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": {", "None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0,", "7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "{\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\":", "overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self):", "{ \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array(", "\"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\":", "\"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\":", "\"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "(include all lines below this point in all # test files) def main():", "costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])},", "#1. sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict): Sample commercial measure #1.", "of competing residential measures; and that 'htcl_adj' properly accounts for heating and cooling", "self.assertCountEqual(i, i2) # Continue to recursively traverse the dict self.dict_check(i, i2) # At", "('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 =", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\":", "\"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\":", "output given residential measure with point value inputs.\"\"\" # Initialize test measure and", "\"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}},", "{ \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}},", "\"2010\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\":", "10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg", "engine object based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information", "-190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}}},", "\"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure c2 dist\",", "energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44,", "25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\":", "= [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "\"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\":", "7\": 115}, \"2010\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95,", "home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None},", "prioritization metric outputs. Attributes: handyvars (object): Useful variables across the class. measure_list (list):", "5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\":", "numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None,", "1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44},", "0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]),", "{ \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": {", "values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test", "cost\": { \"residential\": { \"2009\": None, \"2010\": None }, \"commercial\": { \"2009\": None,", "\"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\":", "the test run of the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life,", "0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251,", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\":", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}},", "numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])},", "4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\":", "-0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([", "'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP',", "\"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\":", "-8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, {", "\"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\":", "0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000,", "-50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": {", "\"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: {", "\"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44,", "values\": { cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "-0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\":", "cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max", "\"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\":", "30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}},", "\"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.overlap_key:", "sample measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check(", "\"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": {", "11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775,", "it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this test suite", "\"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using test measure, run function on", "\"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "{ \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}},", "{\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "(LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"],", "0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500,", "consumer_metrics = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]),", "\"commercial\": { \"2009\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145,", "self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that", "{ \"rate 1\": 205, \"rate 2\": 100, \"rate 3\": 105, \"rate 4\": 110,", "accounts for heating and cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables across", "the dicts # is empty, is missing section(s), or has different key names", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\":", "\"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": {", "of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects and associated contributing microsegment", "overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master market microsegments that should be", "(annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145,", "fraction. ok_out (dict): Sample partitioned measure results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "sample_measure (object): Residential sample measure object. attribute_dict (dict): Dict of sample measure attributes.", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\":", "competition and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "supply-demand overlaps. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample", "\"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5,", "{ \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": {", "numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([", "numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24,", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": {", "savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\":", "\"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}},", "sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the", "CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100,", "dict1, dict2): \"\"\"Check the equality of two dicts. Args: dict1 (dict): First dictionary", "numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings", "{\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\":", "self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize", "ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int):", "the tuple is the key and the second item is the value; #", "\"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\":", "numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": {", "54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\":", "\"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None,", "and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist3' with a residential", "= \"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity", "10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": {", "\"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0,", "{\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\":", "self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary'", "\"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30,", "\"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"},", "numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "\"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5,", "cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08,", "{\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398,", "self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -90,", "{ \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": {", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": {", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\":", "numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]),", "cost. ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave (int): Sample measure energy", "numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\":", "8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000,", "\"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\":", "\"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\":", "the value; # in the case where the dicts are not of identical", "market microsegment key chain being tested. adjust_key2 (string): Second sample string for competed", "\"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": {", "\"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\":", "metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted(", "= [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 =", "{ \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}},", "including stock cost and measure lifetime array. ok_out_point_res (dict): Measure attribute update status,", "a numpy array # (for input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i)", "0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\":", "\"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\":", "{ \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": {", "42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07,", "and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure", "\"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\":", "\"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\":", "self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures", "2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\":", "11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\":", "{\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\":", "5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\":", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 85, \"rate 2\": 90,", "cls.compete_meas4 = { \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\":", "[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 =", "0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon", "secondary microsegments to adjust. a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist' objects.", "each sample measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures):", "{ \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30,", "\"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20,", "\"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\":", "22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227,", "family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"],", "0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]),", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16,", "stock cost. ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave (int): Sample measure", "cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure c1\",", "1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([", "of stock cost input values instead of point values. compete_meas4 (dict): Sample residential", "in 'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "\"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function", "adjustments\": { \"market share\": { \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0,", "9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11,", "\"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([", "\"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]),", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\":", "copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\":", "\"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary", "generated given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict): Measure attribute update", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018),", "10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": {", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": {", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\":", "\"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\":", "\"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate", "case of a dict, the first item # in the tuple is the", "\"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\":", "measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary", "\"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000,", "function. Verify that function properly applies a climate zone/building type/end use partition to", "{ \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": {", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]", "\"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\":", "numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\":", "self.assertTrue( all([isinstance(x, y) for x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key", "\"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}},", "2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255),", "30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single", "{ \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": {", "\"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2,", "key chain being tested. overlap_key_scnd (string): Second sample string for secondary market microsegment", "[\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\":", "14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\":", "Second dictionary to be compared Raises: AssertionError: If dictionaries are not equal. \"\"\"", "\"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]},", "{ \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17,", "5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\":", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": {", "\"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\":", "-150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\": {", "5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output given valid input.\"\"\" # Instantiate", "5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr", "-0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08,", "1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)])", "\"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing", "self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y)", "6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\":", "of competing measures including some measures with array inputs. measures_secondary_dist (list): Subset of", "supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version of sample residential supply-side cooling", "('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary',", "carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815,", "residential supply-side cooling measure 2. compete_meas5 (dict): Sample residential supply-side cooling measure 3.", "-0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08,", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\":", "40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20,", "\"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\":", "numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\":", "\"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\":", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": {", "cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\": { \"original", "needed to finalize array test measure consumer # metrics consumer_metrics = [{ \"stock", "5\": 90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\": {", "\"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" # Run measure", "{\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}},", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist =", "[\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\":", "\"\"\"Class of common sample measures for tests. Attributes: sample_measure (dict): Sample residential measure", "0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\":", "for heating and cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables across the", "-150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\":", "demand-side cooling measure 2. compete_meas3 (dict): Sample residential supply-side cooling measure 1. compete_meas3_dist", "\"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\":", "{ \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0,", "generated given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict): Measure attribute update", "Sample residential supply-side cooling measure 3. measures_all (list): List of all competing/interacting sample", "\"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\":", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure", "5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": {", "{ \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": {", "\"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4,", "[[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1',", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501,", "11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0,", "1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654),", "\"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\":", "\"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\":", "15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\":", "2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": {", "[test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results,", "31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\":", "'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave,", "30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": {", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\":", "3\": 115, \"rate 4\": 120, \"rate 5\": 125, \"rate 6\": 10, \"rate 7\":", "\"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"],", "secondary markets associated with these primary market microsegments. Attributes: handyvars (object): Useful variables", "0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([", "(LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "\"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\": { \"Technical", "110, \"rate 7\": 115}, { \"rate 1\": 205, \"rate 2\": 100, \"rate 3\":", "the measure competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) #", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}}", "{\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\":", "120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\":", "compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "\"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)',", "\"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\":", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": {", "0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0,", "31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592,", "incorporating all 'measures_primary' objects. measures_all_dist (list): List of competing measures including some measures", "Attributes: sample_measure (object): Residential sample measure object. attribute_dict (dict): Dict of sample measure", "Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects and associated", "60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "#1. \"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample measure 1\", \"active\": 1,", "{ \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\": {", "running the engine \"\"\" # Import code to be tested import run #", "20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}},", "{\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": {", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption", "# market ('ok_master_mseg_dist1'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res)", "60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30,", "microsegment key chain being tested. overlap_key_scnd (string): Second sample string for secondary market", "42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183},", "numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": {", "numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1,", "\"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\":", "-100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock", "\"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\":", "5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20},", "\"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\":", "\"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\": { \"rate 1\":", "consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4),", "At the terminal/leaf node, formatted as a point value else: self.assertAlmostEqual(i, i2, places=2)", "including one sample residential measure. ok_num_units (int): Sample number of competed units. ok_base_life", "measure_list (list): List for Engine including one sample residential measure. ok_num_units (int): Sample", "\"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\":", "0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}},", "{ \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\":", "str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": {", "'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary market shares and updates master", "8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\":", "numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\":", "competing measures including some measures with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist'", "Raises: AssertionError: If dictionaries are not equal. \"\"\" # zip() and zip_longest() produce", "5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]),", "at the current level of the recursive # exploration of dict1 and dict2,", "captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}},", "\"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10,", "numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "None}, \"commercial\": { \"2009\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\":", "cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000,", "\"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2,", "numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533),", "1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]),", "0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\":", "{ \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0,", "{\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16,", "4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2,", "29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]),", "\"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/", "{} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\": 1,", "\"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\":", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\":", "\"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "\"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7,", "18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test", "\"rate 3\": 70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15,", "numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\":", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\":", "10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": {", "6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\":", "compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "\"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\":", "\"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\":", "\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\": -100},", "-0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7,", "-0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961,", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "\"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\":", "\"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\":", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max", "{ \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5,", "{ \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7,", "32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": {", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}},", "0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all =", "\"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}},", "\"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point value", "given commercial measure with point value inputs.\"\"\" # Initialize test measure and assign", "\"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "{ \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\":", "\"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\":", "\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\":", "19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}},", "numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\":", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}},", "\"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\":", "numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 23,", "\"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "{ \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": {", "to be compared dict2 (dict): Second dictionary to be compared Raises: AssertionError: If", "6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None,", "node, formatted as a point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure", "test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics", "are equal self.assertCountEqual(i, i2) # Continue to recursively traverse the dict self.dict_check(i, i2)", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\":", "\"\"\" # Import code to be tested import run # Import needed packages", "= { \"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\":", "'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1", "\"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\":", "objects. a_run (object): Analysis engine object incorporating all 'measures_all' objects. measures_all_dist (list): List", "correctly adjusts any secondary markets associated with these primary market microsegments. Attributes: handyvars", "[\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\",", "enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist,", "\"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\":", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -135, \"rate 2\": -140,", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843,", "\"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": {", "Verify that cashflow inputs generate expected prioritization metric outputs. Attributes: handyvars (object): Useful", "\"\"\"Test operation of 'out_break_walk' function. Verify that function properly applies a climate zone/building", "that should be generated given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict):", "keys are equal self.assertCountEqual(i, i2) # Continue to recursively traverse the dict self.dict_check(i,", "[ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1", "\"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\":", "{\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\":", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\",", "{ \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])},", "6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891,", "8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341,", "version of sample residential demand-side cooling measure 1 including lists of energy/carbon and", "\"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\":", "0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: {", "-65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\": { \"rate 1\": -40, \"rate", "of energy/carbon and associated cost input values instead of point values. compete_meas2 (dict):", "{ \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": {", "\"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)},", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential", "29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975,", "{ \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": {", "7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\":", "{ \"2009\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate", "point value test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] =", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": {", "0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy", "{\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44},", "dicts that should be generated given valid sample inputs. ok_out_array (list): Other financial", "self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize", "run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total", "{ \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0,", "-0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\":", "numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\":", "self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side", "1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\":", "\"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\":", "100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36,", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -435,", "costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])},", "0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501,", "(annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost", "\"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows',", "\"rate 2\": 110, \"rate 3\": 120, \"rate 4\": 130, \"rate 5\": 140, \"rate", "where in the case of a dict, the first item # in the", "8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7,", "{ \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5,", "4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock", "scheme. overlap_key (string): First sample string for competed primary market microsegment key chain", "status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_point' with", "2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07,", "-4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\":", "\"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\":", "\"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\":", "{\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\":", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2])", "numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\": {", "\"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict): Measure", "numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])},", "{ \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}},", "'measures_all' with secondary microsegments to adjust. a_run (object): Analysis engine object incorporating all", "numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\":", "{\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6,", "('substituted entry', 5.2) # In this structure, k and k2 are the keys", "key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key 2\":", "version of sample commercial supply-side lighting measure 1 including lists stock cost input", "\"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None,", "\"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, {", "11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000,", "22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6,", "\"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\":", "{ \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}},", "\"2009\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\":", "-75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None }, \"commercial\":", "{ \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0,", "2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346),", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}},", "**self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using test measure, run", "\"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\":", "# zip_longest() will use the fill value created below as a # substitute", "\"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"2010\": 0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\":", "\"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200,", "measure_list (list): List for Engine including one sample residential measure. ok_cashflows (list): Set", "\"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}]", "adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\":", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -40, \"rate 2\":", "\"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\":", "{}}}} cls.compete_meas3 = { \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([", "-0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/", "payback, and # cost of conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array):", "or has different key names self.assertEqual(k, k2) # If the recursion has not", "-0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\":", "4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback", "AssertionError: If dictionaries are not equal. \"\"\" # zip() and zip_longest() produce tuples", "captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure r3", "2\": 110, \"rate 3\": 115, \"rate 4\": 120, \"rate 5\": 125, \"rate 6\":", "Sample analysis engine object. ok_total (dict): Sample unpartitioned measure results data. ok_partitions (dict):", "0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total", "2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07,", "\"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\":", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\":", "{ \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\":", "{\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\":", "11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": {", "= run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types in measure markets attribute", "6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\":", "\"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\":", "\"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure r2\", \"climate_zone\":", "1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\":", "1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)',", "numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\":", "7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1,", "\"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\":", "sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments", "10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\":", "26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341,", "\"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost", "10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self):", "not equal. \"\"\" # zip() and zip_longest() produce tuples for the items #", "\"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\":", "35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\":", "0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the", "\"\"\"Test for correct function output given valid input.\"\"\" # Instantiate measure measure_instance =", "r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\":", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test", "20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113,", "0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": {", "31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}},", "\"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]),", "in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function.", "shares and updates master microsegments for a series of competing residential measures; and", "10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]),", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]),", "0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\":", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\":", "16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "{ \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\":", "\"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2,", "\"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\":", "Set information needed to finalize array test measure consumer # metrics consumer_metrics_final_dist =", "cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys from input dict.\"\"\"", "42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": {", "NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function. Verify that the function", "\"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\":", "2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate", "7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array(", "key chain being tested. adjust_key2 (string): Second sample string for competed demand-side and", "0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": {", "\"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy", "in all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality of two", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": {", "energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed and", "\"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\":", "energy cost savings. ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave (int): Sample", "\"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test measure and", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\":", "105, \"rate 6\": 110, \"rate 7\": 115}, { \"rate 1\": 205, \"rate 2\":", "Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List", "and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on", "given valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for", "23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5,", "Alternative version of sample commercial supply-side lighting measure 1 including lists stock cost", "('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "carbon emissions. ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts (list): Output annuity", "{ \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\":", "0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 =", "\"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost", "\"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5,", "numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\":", "{ \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": {", "2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25,", "1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": {", "\"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\":", "\"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\":", "# Run measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) #", "cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226,", "\"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure", "self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each sample", "30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": {", "sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2", "\"uncompeted\") # For first test case, verify correct adoption/competition scenario # keys for", "{}, \"original energy (competed and captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted", "{\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\":", "\"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\":", "\"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\":", "\"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176,", "generates expected payback output. Attributes: handyvars (object): Useful variables across the class. measure_list", "-4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\":", "equal. \"\"\" # zip() and zip_longest() produce tuples for the items # identified,", "\"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete measure r4\", \"climate_zone\":", "(object): Sample measure data with lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "\"rate 4\": 120, \"rate 5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy", "2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "Sample commercial supply-side lighting measure 2. compete_meas3 (dict): Sample commercial supply-side lighting measure", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine", "{ \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -400, \"2010\":", "cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize array test measure", "193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3,", "{\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\":", "Verify that measure master microsegment inputs yield expected savings and financial metrics outputs.", "captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed and captured)\": {", "{\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502},", "{ \"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),", "**self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure, run", "7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "\"competed choice parameters\": {}, \"secondary mseg adjustments\": { \"market share\": { \"original energy", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341,", "{ \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])},", "that 'secondary_adj' correctly adjusts any secondary markets associated with these primary market microsegments.", "financial metric values that should be generated given valid sample inputs. \"\"\" @classmethod", "(object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that", "2, 3, 4], [-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100,", "\"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10,", "{ \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08,", "Attributes: handyvars (object): Useful variables across the class. sample_measure (object): Sample measure data", "values instead of point values. measures_all (list): List of all competing measures with", "90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\":", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "{ \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353,", "**x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist =", "\"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45,", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704,", "\"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\":", "associated contributing microsegment keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List", "\"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "use partition to a total energy or carbon market/savings value. Attributes: a_run (object):", "c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary", "Sample number of competed units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float):", "{ \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": {", "2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336,", "\"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\":", "\"rate 7\": -120}, \"2010\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\":", "[\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\",", "{ \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}},", "use across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure", "{ \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\":", "{ \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": {", "4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689,", "\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\":", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\":", "array inputs.\"\"\" # Initialize test measure and assign it a sample 'uncompeted' #", "0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07,", "family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\":", "correct adoption/competition scenario # keys for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes:", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for", "-55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}},", "-9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([", "0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},", "for a series of competing commercial measures; and that 'secondary_adj' correctly adjusts any", "{ \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": {", "-0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625),", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091),", "\"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\":", "11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6,", "\"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\":", "{ \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": {", "\"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2,", "-5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150,", "2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346),", "2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: {", "{\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance", "\"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5},", "6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455,", "# heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition", "{ \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\": -105,", "5, 3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\":", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1,", "sample measure. ok_out_point_com (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure", "\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\":", "-4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": {", "{ \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569,", "5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\":", "0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0,", "-9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\":", "\"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\":", "-1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": {", "cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]]", "\"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([", "numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}},", "6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5", "\"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"],", "-0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749,", "adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)',", "20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}},", "\"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\":", "\"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\":", "all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure objects and associated contributing", "\"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\":", "2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "\"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\":", "\"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\":", "15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\":", "{ \"2009\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate", "analysis engine object. ok_total (dict): Sample unpartitioned measure results data. ok_partitions (dict): Sample", "{ \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\":", "21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 =", "\"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\":", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point #", "20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10,", "numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]),", "\"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([", "30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\":", "60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}},", "\"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": {", "\"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\":", "0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce", "{ \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": {", "17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499},", "run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": {", "{\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": {", "{\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01,", "numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings", "{\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "{\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}},", "0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str(", "{ \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": {", "\"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": {", "15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10,", "6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300,", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5,", "-1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1,", "a dict to numpy arrays. Attributes: handyvars (object): Useful variables across the class.", "{ \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array(", "6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30,", "\"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\": {", "15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": { \"total\": { \"baseline\":", "2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr", "10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "\"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\":", "data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "2\": 110, \"rate 3\": 120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\":", "sample measures w/ point value inputs.\"\"\" # Run the measure competition routine on", "True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, {", "cost and measure lifetime array. ok_out_point_res (dict): Measure attribute update status, savings, and", "4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]),", "\"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\":", "\"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\":", "{ \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105,", "measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) },", "\"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "\"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\":", "cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave", "9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": {", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07,", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30,", "i2, # respectively, at the current level of the recursive # exploration of", "\"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1},", "4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\":", "overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side Measure objects and", "for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine", "\"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\":", "2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": {", "scenario # keys for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets", "6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "y) for x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key", "17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}},", "22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15},", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\":", "\"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113,", "\"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "CommonMethods(object): \"\"\"Define common methods for use in all tests below.\"\"\" def dict_check(self, dict1,", "cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26,", "sample_measure (object): Sample measure data with lists to convert. \"\"\" @classmethod def setUpClass(cls):", "\"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None,", "0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}},", "dict structure, # the keys are equal; this should fail if one of", "'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize point", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6,", "0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}},", "2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\":", "cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08,", "-105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\": { \"rate", "63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366,", "itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current location in the dict", "inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object):", "30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize", "\"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": {", "2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2,", "-0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])},", "\"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4", "1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/", "\"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26,", "captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}},", "series of competing residential measures; and that 'htcl_adj' properly accounts for heating and", "the dict structure, # the keys are equal; this should fail if one", "\"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15},", "{ \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\":", "10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5,", "}, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\":", "keys and values\": { cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "run of the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost,", "Sample residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version of sample residential", "class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance", "with secondary microsegments to adjust. a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist'", "Alternative version of sample residential demand-side cooling measure 1 including lists of energy/carbon", "-100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize point value", "6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\":", "{\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\":", "cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home',", "self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods):", "measure master microsegment including all point values at terminal leaf nodes. ok_master_mseg_dist1 (dict):", "15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\":", "\"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3,", "9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": {", "{ \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": {", "\"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": {", "compared Raises: AssertionError: If dictionaries are not equal. \"\"\" # zip() and zip_longest()", "\"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]),", "15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\":", "with 'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side Measure objects and associated", "{\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": {", "is the value; # in the case where the dicts are not of", "10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\":", "1, 1, 1, 5, 7, 8], [-10, 14, 2, 3, 4], [-10, 0,", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 50, \"rate", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73,", "sample measure. ok_out_dist2 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\":", "{}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]]", "30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15},", "1\": [1, 2, 3, 4, 5], \"nested key 2\": 5}, \"key 2\": 10.8},", "{ \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand',", "\"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": {", "(list): Output annuity equivalent Net Present Value dicts that should be generated given", "(object): Analysis engine object incorporating all 'measures_all' objects. measures_all_dist (list): List including competing/interacting", "{ \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20,", "1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample measures", "copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "{ \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": {", "heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\":", "10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40,", "primary market shares and updates master microsegments for a series of competing commercial", "python3 \"\"\" Tests for running the engine \"\"\" # Import code to be", "10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\":", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\":", "{ \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None,", "15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([", "to keys from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class", "self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield correct", "\"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": {", "sample_measure3 (dict): Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure = { \"name\":", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\":", "13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\":", "5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\":", "8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": {", "10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5},", "{ \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": {", "\"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7,", "\"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\":", "{ \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\":", "-6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\":", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test", "adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\":", "0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]),", "below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality of two dicts. Args: dict1", "{ \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438,", "\"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]),", "\"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\":", "1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon", "(total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings", "31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592,", "1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\":", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11,", "-200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon", "home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single", "Subset of 'measures_all' with secondary microsegments to adjust. a_run (object): Analysis engine object", "\"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\":", "captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": {", ".25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30},", "Measure objects with point value inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply", "2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20},", "numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": {", "\"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\":", "overlap data. adjust_key1 (string): First sample string for competed demand-side and supply-side market", "\"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\":", "ok_master_mseg_point (dict): Sample measure master microsegment including all point values at terminal leaf", "300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81,", "18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\":", "(dict): Sample measure master microsegment including all point values at terminal leaf nodes.", "numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\":", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3", "home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\",", "\"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08,", "for the items # identified, where in the case of a dict, the", "\"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\":", "30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "-400}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\":", "{ \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": {", "\"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\":", "5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([", "numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "for use in all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality", "{\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\":", "(string): Key used to link primary and secondary market microsegments (by climate, building", "\"rate 2\": -60, \"rate 3\": -70, \"rate 4\": -380, \"rate 5\": -390, \"rate", "\"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5,", "-0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\":", "-0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\":", "60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30,", "measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "{ \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30,", "-150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\": { \"rate", "1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector", "numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([", "0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x in", "\"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150},", "sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1,", "\"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\":", "{}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}},", "# Check updated competed master microsegments for each sample measure # following competition/secondary", "0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing'))", "tested import run # Import needed packages import unittest import numpy import copy", "\"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5,", "places=2) # At the terminal/leaf node, formatted as a point value else: self.assertAlmostEqual(i,", "1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {", "\"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\":", "7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10},", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3])", "-8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock", "cost\": { \"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\":", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33,", "\"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None,", "{\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30,", "\"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\":", "function. Verify that the function converts terminal/leaf node lists in a dict to", "\"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\":", "35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}}", "{\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\":", "\"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\":", "that should be generated for each Measure object in 'measures_all_dist' following competition and", "1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) #", "partitioning fraction. ok_out (dict): Sample partitioned measure results data. \"\"\" @classmethod def setUpClass(cls):", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key:", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = {", "contributing microsegment keys that overlap with 'measures_supply' Measure objects. a_run (object): Analysis engine", "0.4]}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {", "cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family", "80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\": {", "-390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\":", "the terminal/leaf node, formatted as a point value else: self.assertAlmostEqual(i, i2, places=2) class", "{ \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\":", "-150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock cost\":", "(w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon", "of supply-side Measure objects and associated contributing microsegment keys that overlap with 'measures_demand_dist'", "\"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy", "\"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\":", "0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg", "choice parameters\": {}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total", "8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}},", "numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}]", "\"rate 2\": 100, \"rate 3\": 105, \"rate 4\": 110, \"rate 5\": 115, \"rate", "captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {},", "# At the terminal/leaf node, formatted as a point value else: self.assertAlmostEqual(i, i2,", "for correct function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2", "and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\":", "\"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": {", "None}}}, { \"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95,", "\"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\":", "numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\":", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502,", "{ \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\":", "numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([", "{ \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])},", "{ \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array(", "0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted", "\"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\":", "measure. ok_out_dist4 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "objects and associated contributing microsegment keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist", "100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"],", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([", "import numpy import copy import itertools import os class CommonTestMeasures(object): \"\"\"Class of common", "1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "(grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\":", "discount rate. ok_master_mseg_point (dict): Sample measure master microsegment including all point values at", "fail if one of the dicts # is empty, is missing section(s), or", "numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", "\"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\":", "[\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\":", "{\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": {", "{ \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}},", "cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7,", "{ \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])},", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -350, \"rate 2\": -60,", "numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]),", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}},", "that should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict):", "and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\":", "attribute_dict (dict): Dict of sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5,", "15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}},", "5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1,", "{ \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": {", "3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2,", "-120}}}}] # Adjust/finalize point value test measure consumer metrics for ind, m in", "numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\":", "\"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22},", "{ \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": {", "\"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\":", "20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\":", "\"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\":", "{\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\":", "a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments", "7\": -170}, \"2010\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145,", "1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2,", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity", "measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects", "{ \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": {", "{ \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": {", "-0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876,", "\"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\":", "{\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": {", "(dict): First dictionary to be compared dict2 (dict): Second dictionary to be compared", "0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\":", "\"Heating\": {\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = {", "\"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "\"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\":", "{ \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20,", "that 'htcl_adj' properly accounts for heating and cooling supply-demand overlaps. Attributes: handyvars (object):", "{ \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None,", "{ \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": {", "\"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346),", "\"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create", "measure with array inputs.\"\"\" # Initialize test measure and assign it a sample", "2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\":", "0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114,", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 100, \"rate 2\":", "-190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\": -205, \"rate 5\": -180,", "Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline", "0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671,", "= [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": {", "numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2,", "Create Engine instance using test measure, run function on it engine_instance = run.Engine(self.handyvars,", "self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function. Verify", "\"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\":", "\"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\":", "(grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10,", "lighting measure 2. compete_meas3 (dict): Sample commercial supply-side lighting measure 3. compete_meas_dist (dict):", "lists stock cost input values instead of point values. measures_all (list): List of", "information needed to finalize array test measure consumer # metrics consumer_metrics = [{", "and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17,", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res", "\"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {},", "numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\":", "\"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\":", "residential measure. ok_cashflows (list): Set of sample input cash flows. ok_out (list): Outputs", "cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14, 2,", "1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\":", "7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068,", "\"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "{ cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key:", "None, \"markets\": { \"Technical potential\": { \"key 1\": { \"nested key 1\": [1,", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\":", "\"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08,", "\"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\":", "66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\":", "home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None},", "\"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\": { \"rate 1\":", "an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the", "{\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15,", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0,", "\"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2,", "\"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95,", "potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30},", "{ \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0,", "\"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5,", "delta. ok_esave (int): Sample measure energy savings. ok_ecostsave (int): Sample measure energy cost", "\"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150,", "competed demand-side and supply-side market microsegment key chain being tested. compete_meas1 (dict): Sample", "savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184,", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify", "(competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete", "{ \"stock cost\": { \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\":", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": {", "\"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5,", "Analysis engine object incorporating all 'measures_all' objects. measures_all_dist (list): List including competing/interacting sample", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array", "5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\":", "\"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\":", "handyvars (object): Useful variables across the class. measure_list (list): List for Engine including", "{ \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}},", "('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1',", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\":", "= [cls.measures_all[1]] # Instantiate engine object based on above measures cls.a_run = run.Engine(cls.handyvars,", "11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings", "created below as a # substitute in the dict that has missing content;", "6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\":", "10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": { \"baseline\":", "'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain',", "substitute in the dict that has missing content; this # value is given", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\":", "1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"original energy (total captured)\": {}, \"original energy (competed and captured)\": {}, \"adjusted energy", "\"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\":", "\"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\":", "\"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\":", "OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that function properly applies a", "\"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "Measure objects and associated contributing microsegment keys that overlap with 'measures_supply' Measure objects.", "1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr", "{ \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08,", "\"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\":", "(string): Second sample string for competed demand-side and supply-side market microsegment key chain", "0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\": { \"original", "\"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\":", "numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1),", ".45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\":", "{ \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\":", "{\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}}, \"energy\": { \"total\": {", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics", "run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption", "{ cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\":", "0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy", "6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "-370}, \"2010\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate", "{\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\":", "#!/usr/bin/env python3 \"\"\" Tests for running the engine \"\"\" # Import code to", "50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\":", "\"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\":", "-8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, {", "(for input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x", "{ \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": {", "\"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\",", "16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": {", "\"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300,", "-0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\":", "{ \"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": {", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential", "50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\":", "{\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\":", "6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0,", "cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\":", "following competition and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([", "\"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51,", "in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in", "0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12,", "{\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy", "and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333,", "-110, \"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point value test measure", "supply-side lighting measure 2. compete_meas3 (dict): Sample commercial supply-side lighting measure 3. compete_meas_dist", "40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}},", "numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\":", "market/savings value. Attributes: a_run (object): Sample analysis engine object. ok_total (dict): Sample unpartitioned", "105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\":", "and updates master microsegments for a series of competing residential measures; and that", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list", "dicts. Args: dict1 (dict): First dictionary to be compared dict2 (dict): Second dictionary", "self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update", "setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars", "\"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing", "2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate", "\"commercial\": { \"2009\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95,", "numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\":", "Verify that the function converts terminal/leaf node lists in a dict to numpy", "7\": -400}, \"2010\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70,", "identical size, # zip_longest() will use the fill value created below as a", "self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the", "120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}}},", "\"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20,", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\":", "parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1,", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}},", "0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0,", "operation of the 'payback' function. Verify cashflow input generates expected payback output. Attributes:", "'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "{}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key1:", "\"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "{ \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": {", "100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\": { \"rate", "methods for use in all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the", "\"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\":", "1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": {", "\"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\":", "Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}},", "zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int,", "routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market", "\"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"],", "with a residential sample measure. ok_out_dist3 (dict): Measure attribute update status, savings, and", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\":", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi", "cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist", "numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\":", "0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\":", "\"rate 7\": -75}, \"2010\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\":", "25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = {", "{ \"contributing mseg keys and values\": { cls.adjust_key1: { \"stock\": { \"total\": {", "\"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\":", "\"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([", "self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure", "2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "\"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": {", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1,", "0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point value inputs.\"\"\"", "[\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\":", "0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\":", "c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\",", "has different key names self.assertEqual(k, k2) # If the recursion has not yet", "numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": {", "d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation", "{ \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2,", "\"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65,", "{ \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": {", "{ \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11,", "20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10,", "{\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": {", "15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5,", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array inputs.\"\"\"", "10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455,", "\"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08,", "None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\":", "the fill value created below as a # substitute in the dict that", "numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07,", "{ \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([", "29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}},", "\"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\":", "{\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091),", "{\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\": {\"2009\":", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\":", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list =", "\"contributing mseg keys and values\": { cls.adjust_key1: { \"stock\": { \"total\": { \"all\":", "{\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\":", "\"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"],", "point value test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] =", "should be generated given valid sample inputs. ok_out_array (list): Other financial metric values", "# function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave,", "0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\":", "\"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\":", "10.8}, \"Max adoption potential\": { \"key 1\": { \"nested key 1\": [0.5, 0.2,", "the case of a dict, the first item # in the tuple is", "[\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\",", "dicts from the current keys are equal self.assertCountEqual(i, i2) # Continue to recursively", "\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": {", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\":", "in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\":", "0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0,", "numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])},", "sample measure. ok_out_dist3 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, },", "1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr", "List of supply-side Measure objects and associated contributing microsegment keys that overlap with", "\"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "{\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for", "\"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\":", "{ \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\":", "1 including lists of energy/carbon and associated cost input values instead of point", "6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59, 2, 0.67, 0.005,", "and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption", "[\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\",", "costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])},", "0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary", "yet reached the terminal/leaf node if isinstance(i, dict): # Test that the dicts", "\"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353,", "\"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 =", "fillvalue=fill_val): # Confirm that at the current location in the dict structure, #", "\"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "self.ok_master_mseg_dist2 # Create Engine instance using test measure, run function on it engine_instance", "packages import unittest import numpy import copy import itertools import os class CommonTestMeasures(object):", "100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\": {", "\"rate 7\": -400}, \"2010\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\":", "6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 17, \"2010\":", "yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr", "33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}},", "10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20},", "numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }},", "(total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289,", "all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure", "metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars,", "1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures w/", "{\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039,", "assign it a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of this test", "120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\":", "5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\":", "scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337),", "Alternative version of sample residential supply-side cooling measure 1 including lists of stock", "main(): \"\"\"Trigger default behavior of running all test fixtures in the file.\"\"\" unittest.main()", "\"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\":", "numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650,", "home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "\"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": {", "\"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {}, \"original", "produce tuples for the items # identified, where in the case of a", "in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj'", "'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]}", "\"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\": 100, \"rate", "# market ('ok_master_mseg_dist2'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res)", "10 for yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr in", "\"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\":", "= self.ok_master_mseg_point # Create Engine instance using test measure, run function on it", "1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key2: {", "-0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\":", "34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5,", "class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function. Verify that measure", "5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}},", "\"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key2:", "30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15},", "\"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": {", "5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}},", "measure 3. measures_all (list): List of all competing/interacting sample Measure objects with point", "20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0,", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary',", "{}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure r1 dist\",", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}},", "0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": {\"2009\": 1.73179114,", "20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}},", "\"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20,", "recursion has not yet reached the terminal/leaf node if isinstance(i, dict): # Test", "\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5,", "0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000,", "and updates master microsegments for a series of competing commercial measures; and that", "8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "measure consumer # metrics consumer_metrics = [{ \"stock cost\": { \"residential\": { \"2009\":", "ok_out_dist1 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com", "(competed and captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and", "supply-side lighting measure 1. compete_meas2 (dict): Sample commercial supply-side lighting measure 2. compete_meas3", "\"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": {", "51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\":", "34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17,", "to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "-205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\": {", "\"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\":", "\"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\":", "engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure objects", "0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "\"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\":", "2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate", "sample residential demand-side cooling measure 1 including lists of energy/carbon and associated cost", "\"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\":", "\"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0,", "{ \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\":", "key 1\": [1, 2, 3, 4, 5], \"nested key 2\": 5}, \"key 2\":", "{ \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])},", "sample measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check(", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -350, \"rate 2\": -60, \"rate", "\"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": {", "22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5,", "\"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\":", "5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {", "0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20},", "None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room", "numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist3' with a", "\"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([", "(int): Sample measure avoided carbon costs. ok_out_dicts (list): Output annuity equivalent Net Present", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice", "and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09,", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300},", "\"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1},", "\"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0,", "{\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\":", "expected payback output. Attributes: handyvars (object): Useful variables across the class. measure_list (list):", "{\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\":", "19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5,", "0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given valid", "0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 0,", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial", "(object): Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key", "residential measure with array inputs.\"\"\" # Initialize test measure and assign it a", "'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market shares and updates", "\"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\":", "11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0,", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None,", "{\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\":", "30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\":", "\"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08,", "cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\":", "\"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for", "41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300,", "4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\": {", "\"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\":", "0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0,", "None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": {", "2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate", "{ \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]),", "update status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist3'", "1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 =", "the operation of the 'payback' function. Verify cashflow input generates expected payback output.", "\"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061),", "15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5,", "(annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173,", "self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())),", "'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)", "25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "Sample dict with supply-demand overlap data. adjust_key1 (string): First sample string for competed", "\"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"],", "costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\":", "-0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\":", "30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\":", "0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])},", "Measure object in 'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\" @classmethod def", "{ \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": {", "each set of sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "(string): Second sample string for secondary market microsegment key chain being tested. secnd_adj_key", "\"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "for competed demand-side and supply-side market microsegment key chain being tested. compete_meas1 (dict):", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd:", "measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary", "numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "-0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731,", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\":", "run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\"", "{\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\":", "self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) #", "home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\":", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\":", "\"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist4' with a residential", "\"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\":", "\"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, {", "{\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": {", "associated contributing microsegment keys that overlap with 'measures_supply' Measure objects. a_run (object): Analysis", "15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\":", "{ \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336,", "numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]),", "60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types in measure markets", "\"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\":", "6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" #", "\"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "\"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\":", "\"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]),", "\"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings", "\"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\":", "63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775},", "and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\":", "18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist =", "4\": 110, \"rate 5\": 115, \"rate 6\": 120, \"rate 7\": 125}, { \"rate", "measure 1 including lists of energy/carbon and associated cost input values instead of", "7\": -120}}}}] # Adjust/finalize point value test measure consumer metrics for ind, m", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": {", "numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([", "30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07,", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273,", "\"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22,", "\"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\":", "{ \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\":", "inputs. ok_out_array (list): Other financial metric values that should be generated given valid", "20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10,", "0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000,", "7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": {", "50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50,", "value inputs. measures_secondary (list): Subset of 'measures_all' with secondary microsegments to adjust. a_run", "Second sample string for competed demand-side and supply-side market microsegment key chain being", "sample string for secondary market microsegment key chain being tested. secnd_adj_key (string): Key", "'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051),", "numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])},", "10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5,", "{\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20,", "2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "\"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35,", "market ('ok_master_mseg_dist1'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25,", "\"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\":", "\"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\":", "numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "these primary market microsegments. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme", "{ \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": {", "0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\":", "{ \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: {", "inputs.\"\"\" # Run the measure competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand,", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = {", "-1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy", "34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0,", "\"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"],", "6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing", "\"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings", "{ \"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": {", "2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503,", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}},", "terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy, carbon, and", "(int): Sample measure energy savings. ok_ecostsave (int): Sample measure energy cost savings. ok_csave", "{\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455,", "{\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\":", "\"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": {", "2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "List including competing/interacting sample Measure objects with array inputs. measures_demand_dist (list): Demand-side subset", "adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample measure master microsegment", "\"key 1\": { \"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key", "10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self):", "\"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\":", "{\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]),", "numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([", "numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": {", ".20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\":", "numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\":", "{\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300,", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1])", "\"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897),", "7\": 160}, \"2010\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120,", "-1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([", "'uncompeted' # market ('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars,", "\"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001,", "2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": {", "\"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\":", "(w/ energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]),", "{ \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50},", "\"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": {", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "\"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0,", "captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\",", "\"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\":", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\":", "= { \"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None,", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[2]) # Verify test measure", "= CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object", "cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\":", "class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that function properly applies", "\"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\":", "5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300,", "sample measures w/ some array inputs.\"\"\" # Run measure competition routine on sample", "{\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": {", "40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]),", "{ \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1,", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12,", "{ \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}},", "numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07,", "numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2,", "\"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\",", "metrics that should be generated given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4", "sample residential measure. ok_cashflows (list): Set of sample input cash flows. ok_out (list):", "heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\":", "run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list)", "0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon", "7, 8, 10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999]", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\":", "\"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": {", "(object): Useful variables across the class. sample_measure_res (object): Sample residential measure data. sample_measure_com", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\":", "\"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\": {\"2009\":", "{\"2009\": .40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA", "\"2009\": None, \"2010\": None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate", "{ \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\":", "as a point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure", "1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": {", "-0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07,", "6\": -150, \"rate 7\": -400}, \"2010\": { \"rate 1\": -350, \"rate 2\": -60,", "\"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08,", "145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\":", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1,", "\"contributing mseg keys and values\": { cls.adjust_key2: { \"stock\": { \"total\": { \"all\":", "20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([", "\"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\":", "0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio", "1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\":", "\"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -190, \"rate 2\": -195, \"rate", "100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure r2\",", "\"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15},", "8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\":", "\"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": {", "\"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302,", "self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/ some", "0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "\"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": {", "numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\":", "1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": {", "{\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": {", "{\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "\"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\":", "sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of this test suite test_meas =", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "\"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\":", "\"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749,", "mseg keys and values\": { cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\":", "2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([", "= str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure c1\", \"climate_zone\":", "run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case, verify correct adoption/competition", "30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183,", "\"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\":", "{ \"stock\": { \"total\": { \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": {", "\"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": {", "45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "\"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full", "(w/ energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]),", "import run # Import needed packages import unittest import numpy import copy import", "{ \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\": -380,", "metrics that should be generated given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3", "\"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com =", "None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key 1\": { \"nested key", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with", "numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "\"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {", "0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07,", "{ cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "\"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\":", "m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in", "\"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\":", "numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([", "\"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars =", "1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30,", "{ \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}},", "\"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/", "{ \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}},", "numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\":", "numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24,", "given valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "('ok_master_mseg_dist2'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "(dict): Sample dict with supply-demand overlap data. adjust_key1 (string): First sample string for", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\":", "\"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": {", "{ \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None,", "\"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31,", "= cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1',", "0, \"2010\": 0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\":", "\"rate 7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "of the 'calc_savings_metrics' function. Verify that measure master microsegment inputs yield expected savings", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 #", "value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly", "(list): Subset of 'measures_all' with secondary microsegments to adjust. a_run (object): Analysis engine", "\"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33,", "{\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\":", "{ \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy", "\"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical", "\"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost", "numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07,", "{ \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\":", "1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([", "5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0,", "-1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1,", "key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output", "numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091),", "5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014),", "if isinstance(i, dict): # Test that the dicts from the current keys are", "Sample consumer adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample measure", "1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]),", "d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid", "\"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\":", "{ \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": {", "\"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6,", "2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {", "self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given valid sample measures w/", "99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\": {", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\":", "\"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify", "and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for use", "10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\":", "3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2,", "0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "{}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4),", "\"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\":", "lines below this point in all # test files) def main(): \"\"\"Trigger default", "cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07,", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\":", "\\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in zip([ tested_data[\"key 1\"][\"nested key", "-1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08,", "{\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\":", "\"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\"", "\"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1},", "2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate", "{ \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point value inputs.\"\"\" # Initialize test", "numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": {", "Run the measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme)", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\":", "6\": 100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\":", "irr, payback, and # cost of conserved energy/carbon outputs for ind, x in", "ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample", "10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}},", "0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0,", "{ \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": {", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": {", "'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\":", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\":", "numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([", "overlap_key (string): First sample string for competed primary market microsegment key chain being", "[\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\":", "{\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\":", "25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\":", "{ \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}},", "2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None,", "10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\":", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": {", "array inputs.\"\"\" # Run measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key,", "0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])},", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\":", "30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20,", "{ \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\":", "15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6,", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define", "items # identified, where in the case of a dict, the first item", "# market ('ok_master_mseg_point'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res)", "cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave", "45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\":", "\"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3,", "tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external code execution (include all lines", "(int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int):", "\"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": {", "(dict): Sample commercial measure #1. \"\"\" def __init__(self): self.sample_measure = { \"name\": \"sample", "\"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\":", "Useful variables across the class. measure_list (list): List for Engine including one sample", "\"irr (w/ energy and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/", "stock cost delta. ok_esave (int): Sample measure energy savings. ok_ecostsave (int): Sample measure", "\"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\",", "None}}}, { \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": {", "106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\":", "behavior of running all test fixtures in the file.\"\"\" unittest.main() if __name__ ==", "numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30,", "-8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and", "\"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\":", "m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20,", "numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15,", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07,", "Sample measure energy savings. ok_ecostsave (int): Sample measure energy cost savings. ok_csave (int):", "following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"])", "copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = {", "{\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\":", "{ \"stock cost\": { \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\":", "\"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\":", "case where the dicts are not of identical size, # zip_longest() will use", "os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict", "{\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\":", "16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273,", "dict with supply-demand overlap data. adjust_key1 (string): First sample string for competed demand-side", "\"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA", "{ \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": {", "\"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\":", "# Set information needed to finalize point value test measure # consumer metrics", "{}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} },", "numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537),", "0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18,", "\"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1,", "0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd:", "1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "{\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "operation of 'out_break_walk' function. Verify that function properly applies a climate zone/building type/end", "cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed", "12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": {", "of point values. measures_all (list): List of all competing measures with point value", "\"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\":", "(dict): List of demand-side Measure objects and associated contributing microsegment keys that overlap", "(dict): Sample residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version of sample", "exploration of dict1 and dict2, respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()),", "self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd,", "'metrics_update' function. Verify that cashflow inputs generate expected prioritization metric outputs. Attributes: handyvars", "2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}},", "\"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\":", "16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9,", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single", "in all # test files) def main(): \"\"\"Trigger default behavior of running all", "\"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home',", "competing/interacting sample Measure objects with array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'.", "10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": {", "21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {", "25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3", "21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114,", "consumer metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\":", "from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods):", "being tested. secnd_adj_key (string): Key used to link primary and secondary market microsegments", "15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534,", "stock cost and measure lifetime array. ok_out_point_res (dict): Measure attribute update status, savings,", "-9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an", "\"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "{ \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": {", "(annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2,", "\"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\":", "30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0,", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "\"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "\"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\":", "3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc", "100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\":", "13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\":", "\"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\":", "numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "\"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": {", "{\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\":", "\"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}},", "3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])},", "\"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}},", "4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and", "0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\":", "ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test outcomes given", "measures w/ point value inputs.\"\"\" # Run the measure competition routine on sample", "residential sample measure. ok_out_dist4 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15,", "194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\":", "self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the", "\"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\":", "valid sample measures w/ some array inputs.\"\"\" # Run the measure competition routine", "50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327),", "\"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\":", "{ \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string):", "= { \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "cls.compete_meas1 = { \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\":", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level", "numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 17,", "sample measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check(", "10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes", "{ \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete", "{\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\":", "\"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"],", "0.4345794), numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07,", "[{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\":", "{\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\":", "{ \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68,", "-0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\":", "1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])},", "7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977,", "adoption potential\": { \"key 1\": { \"nested key 1\": [0.5, 0.2, 0.3, 0.4,", "places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates", "with a residential sample measure. ok_out_point_com (dict): Measure attribute update status, savings, and", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091),", "23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}},", "unittest import numpy import copy import itertools import os class CommonTestMeasures(object): \"\"\"Class of", "1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\":", "the terminal/leaf node if isinstance(i, dict): # Test that the dicts from the", "numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62,", "0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg adjustments\": {", "-0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08,", "2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13,", "0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088,", "numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502,", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\":", "numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\":", "{\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {},", "numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])},", "'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information", "\"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\":", "\"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": {", "\"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]),", "{ \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])},", "('ok_master_mseg_dist3'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "\"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\":", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -350, \"rate 2\":", "i2) # At the terminal/leaf node, formatted as a numpy array # (for", "2. compete_meas3 (dict): Sample residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version", "20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10,", "(w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765,", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2])", "instead of point values. compete_meas2 (dict): Sample residential demand-side cooling measure 2. compete_meas3", "\"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251,", "\"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\":", "= { \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "residential supply-side cooling measure 1 including lists of stock cost input values instead", "26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136,", "0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\":", "10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}},", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": {", "-75}, \"2010\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate", "\"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\":", "20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341,", "{ \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])},", "100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify", "cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist", "output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1,", "3\": 70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\":", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify", "{\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": {", "6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20,", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1])", "\"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\":", "c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector", "41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975},", "routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market", "Run measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run", "demand-side cooling measure 1 including lists of energy/carbon and associated cost input values", "measures including some measures with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with", "\"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure", "{ cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test", "\"2010\": None}}}] # Adjust/finalize point value test measure consumer metrics for ind, m", "\"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\":", "\"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None,", "self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None,", "5) }}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956,", "}, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure r1 dist\", \"climate_zone\":", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\":", "True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\":", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res", "self.ok_master_mseg_point # Create Engine instance using test measure, run function on it engine_instance", "0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\"", "array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of", "{ \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336},", "-8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": {", "\"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "function output given valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) #", "of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed", "cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array", "-205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\": { \"rate", "numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])},", "24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12},", "{\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\":", "\"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy", "numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([", "\"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\":", "objects. measures_all_dist (list): List including competing/interacting sample Measure objects with array inputs. measures_demand_dist", "0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "\"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736,", "are not of identical size, # zip_longest() will use the fill value created", "test measure consumer # metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\": {", "Run the measure competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme)", "dict2): \"\"\"Check the equality of two dicts. Args: dict1 (dict): First dictionary to", "\"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "\"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\":", "7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}},", "outcomes given sample measures w/ point value inputs.\"\"\" # Run measure competition routine", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 50, \"rate 2\": 60, \"rate", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77,", "= engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years =", "consumer adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap data. adjust_key1 (string):", "\"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\":", "42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183,", "30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]),", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years", "\"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08,", "{\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "\"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\":", "numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138),", "0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\":", "\"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\":", "baseline stock cost. ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave (int): Sample", "10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5,", "{ \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}, \"competed\": {", "20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10,", "0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/", "{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array", "\"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\": None,", "1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\": -205, \"rate 5\":", "= { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key 1\":", "for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for", "{\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17,", "to the normal output from zip_longest() fill_val = ('substituted entry', 5.2) # In", "0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\":", "\"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07,", "{ \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6,", "mseg adjustments\": { \"market share\": { \"original energy (total captured)\": {}, \"original energy", "-7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335,", "10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0,", "\"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate", "\"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\":", "{\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": {", "10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": {", "sample measures for tests. Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2 (dict):", "compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\":", "\"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\":", "\"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\":", "2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2,", "numpy arrays. Attributes: handyvars (object): Useful variables across the class. sample_measure (object): Sample", "-0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\":", "numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([", "and captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\":", "5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\":", "\"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\":", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\":", "11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44,", "measure data with lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "given valid sample inputs. ok_out_array (list): Other financial metric values that should be", "test measure consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": {", "Verify that 'compete_com_primary' correctly calculates primary market shares and updates master microsegments for", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "Engine instance using test measure, run function on it engine_instance = run.Engine(self.handyvars, [test_meas])", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy", "\"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\":", "{ \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": {", "{\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009,", "\"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "\"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([", "CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5,", "= { \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "each sample measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures):", "associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List", "1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct", "None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys", "and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock cost", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\":", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]),", "sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across", "valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct", "numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\":", "home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set", "21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\":", "5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": {", "3\": -55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\":", "numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": {", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5,", "of sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "{\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\":", "{ \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": {", "\"rate 6\": -160, \"rate 7\": -370}, \"2010\": { \"rate 1\": -435, \"rate 2\":", "2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None,", "AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "{\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}},", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89,", "function output given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out", "0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\":", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with", "5\": -155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\": {", "master microsegment inputs yield expected savings and financial metrics outputs. Attributes: handyvars (object):", "0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "\"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\":", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "-7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([", "5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": {", "\"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\":", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([", "{ \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5,", "reached the terminal/leaf node if isinstance(i, dict): # Test that the dicts from", "# the dicts or unitary values that are found in i and i2,", "\"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify", "\"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3,", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given", "engine object incorporating all 'measures_all' objects. measures_all_dist (list): List including competing/interacting sample Measure", "Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5,", "measures with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments to", "\"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10 for", "0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}},", "# following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][", "a # substitute in the dict that has missing content; this # value", "\"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\":", "\"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {},", "carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108,", "\"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\":", "\"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\":", "{ \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": {", "numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": {", "0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835,", "5\": -155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\": { \"rate 1\": -135,", "None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate", "\"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": {", "\"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\":", "-100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\":", "\"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\":", "{ \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])},", "'htcl_adj' properly accounts for heating and cooling supply-demand overlaps. Attributes: handyvars (object): Useful", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24,", "{ \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": {", "cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))],", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\":", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\":", "x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)", "\"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\": 90,", "30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}},", "1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr", "-0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([", "Args: dict1 (dict): First dictionary to be compared dict2 (dict): Second dictionary to", "emissions. ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts (list): Output annuity equivalent", "\"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\":", "\"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794),", "-2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([", "competed units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime.", "functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list =", "\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\":", "dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'calc_savings_metrics' function. Verify that", "\"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\":", "structure type). compete_meas1 (dict): Sample commercial supply-side lighting measure 1. compete_meas2 (dict): Sample", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": {", "10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}},", "CommonTestMeasures(object): \"\"\"Class of common sample measures for tests. Attributes: sample_measure (dict): Sample residential", "numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": {", "def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test", "y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]],", "\"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array(", "'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": {", "5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": {", "master microsegment including all point values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\":", "{}}}} cls.compete_meas2 = { \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26,", "6\": -160, \"rate 7\": -370}, \"2010\": { \"rate 1\": -435, \"rate 2\": -440,", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00,", "share\": { \"original energy (total captured)\": {}, \"original energy (competed and captured)\": {},", "\"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None,", "5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]),", "0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984,", "run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime", "44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}},", "\"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\":", "-1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy", "\"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 69, \"2010\":", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([", "costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\":", "os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run", "= os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)]", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2,", "numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056),", "supply-side cooling measure 2. compete_meas5 (dict): Sample residential supply-side cooling measure 3. measures_all", "17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\":", "across the supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist,", "1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1,", "\"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\":", "metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": {", "8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([", "None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\":", "\"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([", "\"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\":", "{ \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": {", "results data. ok_partitions (dict): Sample results partitioning fraction. ok_out (dict): Sample partitioned measure", "\"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\":", "1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\":", "8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}},", "terminal/leaf node, formatted as a numpy array # (for input uncertainty test cases)", "4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}},", "\"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\":", "Offer external code execution (include all lines below this point in all #", "and cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme", "Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_point'), the", "\"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\":", "heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine", "numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346),", "2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978),", "numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([", "3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2,", "999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an", "15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\":", "and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) #", "numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "\"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify", "10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "code execution (include all lines below this point in all # test files)", "10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5,", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics", "3\": 120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\":", "22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5,", "\"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None,", "\"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\":", "\"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\":", "\"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\":", "stock cost input values instead of point values. compete_meas4 (dict): Sample residential supply-side", "{\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950},", "enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions.", "{ \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\":", "\"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')):", "'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply',", "0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for", "{ \"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5,", "use in all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check the equality of", "numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([", "33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\":", "7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "1. compete_meas1_dist (dict): Alternative version of sample residential demand-side cooling measure 1 including", "8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\":", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]", "\"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21,", "0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "\"mseg_adjust\": { \"contributing mseg keys and values\": { cls.overlap_key: { \"stock\": { \"total\":", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array(", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars,", "10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\":", "None}, \"commercial\": { \"2009\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\":", "{\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\":", "120, \"rate 7\": 125}, { \"rate 1\": 105, \"rate 2\": 110, \"rate 3\":", "0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy", "{ \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply',", "17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "1. compete_meas2 (dict): Sample commercial supply-side lighting measure 2. compete_meas3 (dict): Sample commercial", "{\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "\"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": {", "stock cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure lifetime array.", "energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x", "4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20},", "0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([", "{\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300},", "\"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, { \"rate 1\":", "30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array( [20, 21,", "-0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total", "30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\":", "def test_numpy_convert(self): \"\"\"Test for correct function output given valid input.\"\"\" # Instantiate measure", "{ \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings", "-145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370},", "{ \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": {", "{\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\":", "(grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family", "numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }},", "\"rate 6\": -70, \"rate 7\": -75}, \"2010\": { \"rate 1\": -40, \"rate 2\":", "42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775,", "outcomes given valid sample measures w/ some array inputs.\"\"\" # Run measure competition", "0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\":", "(grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "{\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": {", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\":", "class CommonTestMeasures(object): \"\"\"Class of common sample measures for tests. Attributes: sample_measure (dict): Sample", "energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": {", "{ \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1,", "numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}},", "{ cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}},", "\"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\":", "15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": {", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0,", "9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": {", "\"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "commercial measures; and that 'secondary_adj' correctly adjusts any secondary markets associated with these", "1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785,", "self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) #", "metrics that should be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com", "None}, \"commercial\": { \"2009\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\":", "numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([", "(dict): Second dictionary to be compared Raises: AssertionError: If dictionaries are not equal.", "\"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\":", "-9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": {", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\":", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\":", "\"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\":", "self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each sample measure #", "self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated. Attributes:", "\"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30,", "\"rate 7\": -170}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\":", "1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\":", "\"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "\"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\":", "captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(", "2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2,", "10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]),", "0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential measure with array inputs.\"\"\" #", "{ \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": {", "0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test", "\"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio", "{\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\":", "{ \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0,", "ok_ecostsave (int): Sample measure energy cost savings. ok_csave (int): Sample measure avoided carbon", "numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([", "item is the value; # in the case where the dicts are not", "test files) def main(): \"\"\"Trigger default behavior of running all test fixtures in", "\"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "Measure object in 'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master", "-1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\":", "15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}},", "import copy import itertools import os class CommonTestMeasures(object): \"\"\"Class of common sample measures", "{ \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5},", "= run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize point value test measure", "lifetime array. ok_out_point_res (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\":", "0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "-0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]),", "5) }}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07,", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07, 2, 2.223862),", "1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50", "numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {", "= { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\":", "primary and secondary market microsegments (by climate, building type, structure type). compete_meas1 (dict):", "= run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize array test measure consumer", "numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1,", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\":", "0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\":", "\"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": {", "2.44, 2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54,", "and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure 3", "5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "of the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt,", "measure with point value inputs.\"\"\" # Initialize test measure and assign it a", "-170}, \"2010\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate", "numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\":", "savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single", "cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd", "captured)\": {}, \"original energy (competed and captured)\": {}, \"adjusted energy (total captured)\": {},", "0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate", "{}}}} cls.compete_meas2_dist = { \"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "{ \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": {", "= [[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14, 2, 3,", "\"Max adoption potential\": { \"key 1\": { \"nested key 1\": [0.5, 0.2, 0.3,", "(commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full", "\"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\":", "numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": {", "-170}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "\"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key1: { \"stock\":", "{ \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}},", "\"\"\"Check the equality of two dicts. Args: dict1 (dict): First dictionary to be", "numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]),", "\"2010\": numpy.array([ { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate", "{\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\":", "sample measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check(", "cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\":", "'out_break_walk' function. Verify that function properly applies a climate zone/building type/end use partition", "expected prioritization metric outputs. Attributes: handyvars (object): Useful variables across the class. measure_list", "0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}},", "\"rate 5\": 115, \"rate 6\": 120, \"rate 7\": 125}, { \"rate 1\": 105,", "\"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\":", "\"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None,", "\"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386,", "measure lifetime array. ok_out_point_res (dict): Measure attribute update status, savings, and portfolio/consumer-level financial", "{ \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}]", "64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455,", "of conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if x is not", "\"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\":", "None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 100, \"2010\": 100},", "# Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'),", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\":", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1", "4, 7, 8, 10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0,", "15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": {", "\"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\":", "market shares and updates master microsegments for a series of competing commercial measures;", "\"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181,", "to finalize array test measure consumer # metrics consumer_metrics = [{ \"stock cost\":", "{ \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}},", "5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy", "outcomes given valid sample measures w/ point value inputs.\"\"\" # Run the measure", "and i2, # respectively, at the current level of the recursive # exploration", "15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10,", "mseg adjustments\": { \"market share\": { \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\":", "\"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\":", "-0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" #", "\"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "adjustment\": { \"savings\": { cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1:", "outputs given valid inputs.\"\"\" # Create an Engine instance using sample_measure list engine_instance", "\"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\":", "\"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\":", "\"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35},", "{ \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": {", "metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results,", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "-10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {", "0, \"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}},", "\"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7,", "\"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output given valid input.\"\"\"", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "{ \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\":", "8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": {", "\"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}},", "\"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\":", "{ \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\":", "{ \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 100,", "= { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 120, \"2010\": 120},", "Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[", "{ \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\":", "{ \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5,", "5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1,", "0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\":", "# zip() and zip_longest() produce tuples for the items # identified, where in", "\"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\":", "\"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in zip([", "\"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\":", "200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array(", "and # cost of conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if", "sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total =", "\"rate 2\": 60, \"rate 3\": 70, \"rate 4\": 80, \"rate 5\": 90, \"rate", "\"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\":", "and k2 are the keys that correspond to # the dicts or unitary", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": {", "payback output. Attributes: handyvars (object): Useful variables across the class. measure_list (list): List", "0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "Continue to recursively traverse the dict self.dict_check(i, i2) # At the terminal/leaf node,", "[100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should", "\"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"],", "\"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\":", "numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84,", "\"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}}", "{ \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": {", "{ \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7,", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": {", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\":", "\"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": {", "(dict): Alternative version of sample residential supply-side cooling measure 1 including lists of", "class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function. Verify that the", "4.76654), None, None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def", "ok_out_point_com (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "5\": 105, \"rate 6\": 110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\": {", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([", "of the 'payback' function. Verify cashflow input generates expected payback output. Attributes: handyvars", "\"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([", "10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "{\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "a series of competing residential measures; and that 'htcl_adj' properly accounts for heating", "(list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects and", "is empty, is missing section(s), or has different key names self.assertEqual(k, k2) #", "1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": {", "across the supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist,", "numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": {", "{\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\":", "{}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand", "1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/ point", "10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\":", "str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": {", "= 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave =", "cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\": {", "(int): Sample measure energy cost savings. ok_csave (int): Sample measure avoided carbon emissions.", "= { \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\":", "\"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\":", "\"rate 7\": 125}, { \"rate 1\": 105, \"rate 2\": 110, \"rate 3\": 115,", "\"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\":", "\"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\":", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class", "1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\":", "cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08,", "0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835,", "default behavior of running all test fixtures in the file.\"\"\" unittest.main() if __name__", "(string): First sample string for competed demand-side and supply-side market microsegment key chain", "\"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([ -9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07,", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10,", "{ \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\":", "45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "{ \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": {", "18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {", "'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "market microsegment key chain being tested. overlap_key_scnd (string): Second sample string for secondary", "\"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11,", "7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000,", "30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15,", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02,", "\"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\":", "cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "\"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10,", "1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5,", "24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}},", "family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\",", "\"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "\"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "}}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\":", "{} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\", \"active\":", "self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy'", "\"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\":", "15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": {", "market ('ok_master_mseg_dist2'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][", "0}}, \"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}},", "\"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([", "master microsegments for a series of competing residential measures; and that 'htcl_adj' properly", "'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family", "-195, \"rate 3\": -190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230,", "\"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\":", "consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{", "7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "\"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\":", "\"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": {", "\"rate 1\": 105, \"rate 2\": 110, \"rate 3\": 115, \"rate 4\": 120, \"rate", "{ \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": {", "8, 10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def", "1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5,", "energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock cost array.", "cost\": { \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\":", "\"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07,", "Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the", "10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5,", "r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "{ cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "{ cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}},", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": {", "27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\":", "5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "= self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test", "{ \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "{ \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": {", "\"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\":", "attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "'compete_res_primary' correctly calculates primary market shares and updates master microsegments for a series", "[15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "keys that overlap with 'measures_supply' Measure objects. a_run (object): Analysis engine object incorporating", "and that 'htcl_adj' properly accounts for heating and cooling supply-demand overlaps. Attributes: handyvars", "30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods):", "\"total\": { \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\":", "5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0,", "20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]),", "20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "\"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}},", "{ \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": {", "{\"2009\": 0, \"2010\": 0}}, \"original energy (competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0,", "Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) #", "51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100,", "\"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\":", "[\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\":", "\"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\":", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) #", "2.59768671}, \"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336},", "\"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}] #", "2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None,", "self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify", "2\": 60, \"rate 3\": 70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\":", "\"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]),", "{\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": {", "valid sample measures w/ some array inputs.\"\"\" # Run measure competition routine on", "measure. ok_out_dist3 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\":", "\"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\":", "generated given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict): Measure attribute update", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\":", "0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25,", "{}}}} class CommonMethods(object): \"\"\"Define common methods for use in all tests below.\"\"\" def", "cashflow inputs generate expected prioritization metric outputs. Attributes: handyvars (object): Useful variables across", "numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66,", "numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])},", "\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1},", "'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity", "}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"],", "{\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\":", "\"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": {", "\"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None,", "numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794),", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\":", "2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014),", "{ \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}},", "0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": {", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4", "self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures", "20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\":", "cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "(object): Sample analysis engine object. ok_total (dict): Sample unpartitioned measure results data. ok_partitions", "\"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\":", "0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings", "\"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\":", "\"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246,", "6\": 10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\":", "-100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120},", "= [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None,", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23,", "numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0,", "2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate", "[\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical potential\": {", "across the supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand,", "{\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": {", "20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "value; # in the case where the dicts are not of identical size,", "master microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including", "{\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\":", "{ \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}},", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": {", "\"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\":", "to finalize array test measure consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\":", "\"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455,", "compete_meas2 (dict): Sample commercial supply-side lighting measure 2. compete_meas3 (dict): Sample commercial supply-side", "18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\":", "[\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential'", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self):", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars,", "\"2010\": 0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}]", "measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": {", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07,", "10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\":", "benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/", "\"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\":", "0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000,", "{ \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\":", "cost\": { \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\":", "{ \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\":", "that should be generated given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict):", "\"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0,", "\"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20,", "\"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\":", "enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify that", "\"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\":", "be of comparable structure # to the normal output from zip_longest() fill_val =", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\":", "numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074),", "TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated. Attributes: sample_measure (object): Residential sample", "21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\":", "10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\":", "\"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "\"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),", "\"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function. Verify that", "savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}},", "(string): First sample string for competed primary market microsegment key chain being tested.", "(competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": {", "'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\":", "\"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\":", "0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and", "\"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None,", "\"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\":", "\"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\": {\"2009\":", "be generated given valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501,", "for competed demand-side and supply-side market microsegment key chain being tested. adjust_key2 (string):", "run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime =", "-435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155,", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings", "{ \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571,", "\"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\":", "given valid sample measures w/ some array inputs.\"\"\" # Run measure competition routine", "savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist2' with a", "self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply and", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure", "cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs", "19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\":", "\"rate 2\": 90, \"rate 3\": 95, \"rate 4\": 100, \"rate 5\": 105, \"rate", "45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15,", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = {", "'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary',", "numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017),", "\"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"],", "4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list", "{ \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": {", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = {", "19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of", "function properly applies a climate zone/building type/end use partition to a total energy", "{ \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}},", "cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each", "with secondary microsegments to adjust. a_run (object): Analysis engine object incorporating all 'measures_primary'", "cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\":", "commercial supply-side lighting measure 1. compete_meas2 (dict): Sample commercial supply-side lighting measure 2.", "\"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3])", "test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point value inputs.\"\"\" # Initialize test", "21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\":", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test", "6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068,", "[\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information", "{\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": {", "7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": { \"total\":", "{\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\":", "\"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\":", "\"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}}}}, {", "7\": -0.5}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2,", "\"rate 2\": -95, \"rate 3\": -100, \"rate 4\": -105, \"rate 5\": -110, \"rate", "31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}},", "numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([", "0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings", "60, \"rate 3\": 70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100,", "0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\":", "40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775,", "\"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\":", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": {", "cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\":", "cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize", "0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07,", "self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that", "# Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics", "an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that", "numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "adjustments\": { \"market share\": { \"original energy (total captured)\": {}, \"original energy (competed", "{ \"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\":", "lists of stock cost input values instead of point values. compete_meas4 (dict): Sample", "0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0,", "\"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6,", "\"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {", "{ \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": {", "0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835,", "\"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08, -2.450490e-08,", "cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}},", "Residential sample measure object. attribute_dict (dict): Dict of sample measure attributes. \"\"\" @classmethod", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each sample measure", "ok_out_dist2 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "objects. measure_master_msegs_out (dict): Master market microsegments that should be generated for each Measure", "11.5, \"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\":", "overlap_key_scnd (string): Second sample string for secondary market microsegment key chain being tested.", "that should be generated for each Measure object in 'measures_all' following competition and", "20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\":", "1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\":", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\":", "self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify", "gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete", "0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]),", "20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\":", "string for secondary market microsegment key chain being tested. secnd_adj_key (string): Key used", "for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20,", "20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\":", "\"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\":", "{\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333,", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -190,", "self.measure_list) # Test that valid input cashflows yield correct output payback values for", "execution (include all lines below this point in all # test files) def", "5\": -390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\": { \"rate 1\": -350,", "\"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\":", "\"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22,", "(w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single family home',", "\"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\":", "10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}},", "= run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys", "{ \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])},", "being tested. adjust_key2 (string): Second sample string for competed demand-side and supply-side market", "cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\":", "20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy", "\"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\":", "\"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\":", "1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2,", "self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield correct anpv, irr, payback, and", "4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\":", "2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2,", "i2[x], places=2) # At the terminal/leaf node, formatted as a point value else:", "cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x],", "\"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": {", "6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": {", "\"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": {", "numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\": {", "\"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\":", "a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this test suite test_meas", "numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": {", "\"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2,", "-1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07,", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test", "(by climate, building type, structure type). compete_meas1 (dict): Sample commercial supply-side lighting measure", "location in the dict structure, # the keys are equal; this should fail", "energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054,", "{\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671},", "\"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5),", "\"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "dictionaries are not equal. \"\"\" # zip() and zip_longest() produce tuples for the", "outputs for ind, x in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x,", "master microsegments for each sample measure # following competition/secondary microsegment adjustments for ind,", "\"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\":", "19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\":", "# identified, where in the case of a dict, the first item #", "the class. test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict with", "self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/ some", "-0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08,", "\"rate 7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"],", "{\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20},", "None}, \"commercial\": { \"2009\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\":", "{ \"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2},", "\"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": { \"Technical", "5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\":", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the", "{}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "\"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\":", "10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])},", "\"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {", "30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90,", "\"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10,", "{ \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}},", "25}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\":", "with 'measures_supply' Measure objects. a_run (object): Analysis engine object incorporating all 'measures_all' objects.", "20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25,", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify", "108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {", "\"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5,", "18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\":", "{\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "\"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}},", "10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2", "\"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794),", "{ \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15},", "\"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08,", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\":", "run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure,", "\"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\": -105, \"rate", "0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301,", "\"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\":", "-8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": {", "benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000,", "properly accounts for heating and cooling supply-demand overlaps. Attributes: handyvars (object): Useful variables", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04,", "and associated contributing microsegment keys that overlap with 'measures_supply' Measure objects. a_run (object):", "\"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\":", "25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30},", "20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\":", "[\"general service (LED)\"]}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "\"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\":", "\"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\":", "\"2010\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\":", "outputs. Attributes: handyvars (object): Useful variables across the class. measure_list (list): List for", "22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\":", "{ \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\":", "CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that function properly applies a climate", "None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0,", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine", "self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\":", "5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10},", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\":", "{\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\":", "numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "# Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results update status", "{\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\":", "-1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\":", "10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\":", "series of competing commercial measures; and that 'secondary_adj' correctly adjusts any secondary markets", "\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -400, \"2010\": -400},", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure", "22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650,", "\"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": {", "{ \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6,", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2,", "and secondary market microsegments (by climate, building type, structure type). compete_meas1 (dict): Sample", "of demand-side Measure objects and associated contributing microsegment keys that overlap with 'measures_supply'", "90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45},", "6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1", "Create an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record", "energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample", "\"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\":", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist", "that has missing content; this # value is given as a tuple to", "\"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"baseline\":", "0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct", "yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }}", "\"rate 3\": 95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate", "{\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\":", "{ \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": {", "measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind],", "\"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\":", "31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": {", "3\": -100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\":", "14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]),", "string for competed demand-side and supply-side market microsegment key chain being tested. compete_meas1", "None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\":", "2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001,", "7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "\"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\":", "0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\":", "\"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\":", "instead of point values. compete_meas4 (dict): Sample residential supply-side cooling measure 2. compete_meas5", "dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation", "None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\":", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977,", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}}", "\"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\":", "'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that should be generated for each", "cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "\"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6,", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1])", "numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([", "\"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": {", "numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596),", "\"rate 2\": -440, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate", "\"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "\"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": {", "5\": -110, \"rate 6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point value test", "a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of this test suite test_meas", "test case, verify correct adoption/competition scenario # keys for measure markets/savings/portfolio metrics for", "for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": {", "# Remove any market overlaps across the supply and demand sides of #", "idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and", "overlaps across the supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj(", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4", "8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\": 8.5, \"2010\": 6}, \"efficient\": {\"2009\": 0,", "[25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\":", "0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 =", "\"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\":", "\"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "Measure objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\":", "20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15,", "= run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case, verify correct", "CZ2\": { \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\":", "engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows yield correct output", "aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme =", "42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341,", "8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300,", "sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this test suite test_meas =", "object. attribute_dict (dict): Dict of sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "\"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\":", "= [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary =", "\"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "measure 1 including lists of stock cost input values instead of point values.", "-60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock cost\":", "6\": 110, \"rate 7\": 115}, \"2010\": { \"rate 1\": 85, \"rate 2\": 90,", "0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]),", "\"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3,", "-3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, {", "engine \"\"\" # Import code to be tested import run # Import needed", "\"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2,", "savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist1' with a", "on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure", "\"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\":", "\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\":", "4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"]", "numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\":", "15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7,", "\"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}},", "23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\":", "# to the normal output from zip_longest() fill_val = ('substituted entry', 5.2) #", "not yet reached the terminal/leaf node if isinstance(i, dict): # Test that the", "\"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\":", "= { \"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None,", "numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": {", "\"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost", "2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "{ \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\":", "0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": { \"total\": {", "-4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": {", "market shares and updates master microsegments for a series of competing residential measures;", "run.Engine(self.handyvars, self.measure_list) # Record the output for the test run of the 'metric_update'", "\"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1',", "numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/", "0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "{\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"carbon\": { \"total\": {", "input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in", "-50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": {", "-0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "\"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113,", "0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{", "below as a # substitute in the dict that has missing content; this", "{ \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\":", "6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": {", "dict, the first item # in the tuple is the key and the", "cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)',", "w/ point value inputs.\"\"\" # Run measure competition routine on sample measures self.a_run.compete_com_primary(", "Confirm that at the current location in the dict structure, # the keys", "0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])}, \"payback", "6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34,", "{ \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])},", "numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": {", "\"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\":", "'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure", "\"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5,", "7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "{ \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\":", "{ \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\": {", "{\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\":", "\"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366},", "compete_meas4 (dict): Sample residential supply-side cooling measure 2. compete_meas5 (dict): Sample residential supply-side", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\":", "= { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling',", "data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point", "cls.compete_meas3 = { \"name\": \"sample compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "\"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\":", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\":", "\"mseg_out_break\": {}}}} cls.compete_meas4 = { \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "\"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\":", "\"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "(w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834,", "'assembly', 'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\":", "for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}},", "2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20},", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all", "30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {", "with these primary market microsegments. Attributes: handyvars (object): Useful variables across the class.", "and the second item is the value; # in the case where the", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\":", "with point value inputs.\"\"\" # Initialize test measure and assign it a sample", "\"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}}", "{ \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10,", "costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709,", "'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single", "\"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\",", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([", "-1.035359e-07, -9.523954e-08, -1.021532e-07, -9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\":", "\"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting',", "0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}}", "-160, \"rate 7\": -370}, \"2010\": { \"rate 1\": -435, \"rate 2\": -440, \"rate", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\":", "be tested import run # Import needed packages import unittest import numpy import", "\"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure, run function on", "numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "\"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\":", "{\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 23,", "\"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([12, 10,", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": {", "{ \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\": {", "{\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}},", "0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": { \"Technical", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme =", "10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887,", "(dict): Sample commercial supply-side lighting measure 2. compete_meas3 (dict): Sample commercial supply-side lighting", "self.test_htcl_adj) # Run the measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist,", "{ \"2009\": 0, \"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6,", "energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings", "@classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir =", "0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy", "this should fail if one of the dicts # is empty, is missing", "\"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "\"savings (annual)\": {\"2009\": 100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15},", "given residential measure with array inputs.\"\"\" # Initialize test measure and assign it", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\":", "5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\":", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family", "\"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\":", "{ \"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "item # in the tuple is the key and the second item is", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": { \"2009\":", "\"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\":", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2,", "\"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\":", "1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503,", "{ \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for", "# Run the measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1,", "self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that function properly", "numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\": {", "10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": {", "\"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": {", "27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302,", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test", "0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251,", "\"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\":", "{\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366,", "Verify that 'compete_res_primary' correctly calculates primary market shares and updates master microsegments for", "numpy.array([16, 27, 31, 6, 51]), \"2010\": numpy.array([106, 95, 81, 11, 124])}}, \"competed\": {", "Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int): Sample", "14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874,", "{}}}} self.sample_measure4 = { \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\":", "lighting measure 3. compete_meas_dist (dict): Alternative version of sample commercial supply-side lighting measure", "(competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {", "sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this test suite test_meas =", "(total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and captured)\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\":", "supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure", "5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using test measure,", "output given residential measure with array inputs.\"\"\" # Initialize test measure and assign", "\"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2,", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "\"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\":", "test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure", "'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\":", "adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\": {},", "-8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None, \"2010\":", "0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07,", "\"\"\"Test output given residential measure with point value inputs.\"\"\" # Initialize test measure", "\"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\":", "-4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\":", "-2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": {", "{ \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\":", "30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15},", "microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated", "{\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\":", "[0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}} def", "\"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\":", "18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}},", "21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114,", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([10, 12,", "cls.compete_meas3_dist = { \"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods):", "yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\":", "2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "(list): Outputs that should be generated for each set of sample cash flows.", "44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]),", "in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x)", "5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346),", "(grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home',", "\"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\":", "0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\":", "\"rate 2\": -140, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate", "k2 are the keys that correspond to # the dicts or unitary values", "'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\":", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\":", "1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\":", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736,", "2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}},", "market microsegments (by climate, building type, structure type). compete_meas1 (dict): Sample commercial supply-side", "\"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\":", "-2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08,", "Measure objects with array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist (list):", "\"rate 6\": -160, \"rate 7\": -170}}}}, { \"stock cost\": { \"residential\": { \"2009\":", "correctly initiated. Attributes: sample_measure (object): Residential sample measure object. attribute_dict (dict): Dict of", "0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function. Verify cashflow input generates", "34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}},", "0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}},", "self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd,", "20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5,", "\"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\":", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "\"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/", "\"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": { \"2009\":", "\"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16},", "type). compete_meas1 (dict): Sample commercial supply-side lighting measure 1. compete_meas2 (dict): Sample commercial", "List of all competing measures with point value inputs. measures_secondary (list): Subset of", "residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "\"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\":", "commercial supply-side lighting measure 2. compete_meas3 (dict): Sample commercial supply-side lighting measure 3.", "yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home',", "\"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "100, \"rate 7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300,", "\"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\":", "{ \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}},", "-0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": {", "10, \"2010\": numpy.array([0, 1.5, 2.6])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0,", "numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "-0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08,", "cooling measure 1. compete_meas1_dist (dict): Alternative version of sample residential demand-side cooling measure", "numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\": {\"2009\": None, \"2010\":", "engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes)", "cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point =", "4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\": {", "\"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15,", "30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15,", "{}}}} cls.compete_meas4 = { \"name\": \"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "\"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([", "\"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\":", "compared dict2 (dict): Second dictionary to be compared Raises: AssertionError: If dictionaries are", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\":", "is missing section(s), or has different key names self.assertEqual(k, k2) # If the", "148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3,", "numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }},", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\":", "objects. measures_overlap (dict): List of supply-side Measure objects and associated contributing microsegment keys", "1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)", "\"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\":", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\":", "0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1,", "\"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr:", "**self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using test measure, run", "valid input cashflows yield correct output payback values for idx, cf in enumerate(self.ok_cashflows):", "{\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\":", "numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])},", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\":", "handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run =", "Import code to be tested import run # Import needed packages import unittest", "{ \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1,", "\"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": {", "\"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None},", "{ \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self):", "\"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\":", "-1 cls.ok_esave = 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1", "32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114,", "\"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\":", "\"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": {", "baseline->measure stock cost delta. ok_esave (int): Sample measure energy savings. ok_ecostsave (int): Sample", "\"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\":", "5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45,", "**self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using test measure, run", "-1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost", "variables across the class. sample_measure_res (object): Sample residential measure data. sample_measure_com (object): Sample", "18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}}", "key chain being tested. secnd_adj_key (string): Key used to link primary and secondary", "\"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\":", "-150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\": { \"rate", "cost\": { \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\":", "x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test", "{ \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}},", "class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'metrics_update' function. Verify that cashflow", "{ \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": {", "\"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]),", "\"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0.432947785, \"2010\":", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": {", "16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273,", "{ \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": {", "assign it a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this test", "class. measure_list (list): List for Engine including one sample residential measure. ok_num_units (int):", "\"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" # Run the", "\"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7,", "{\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": {", "{}}}} self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\": None,", "should be generated given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod def", "31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\":", "-3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\":", "numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/", "\"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\":", "valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class", "savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3,", "residential demand-side cooling measure 2. compete_meas3 (dict): Sample residential supply-side cooling measure 1.", "5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "\"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\":", "use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate", "\"rate 6\": 120, \"rate 7\": 125}, { \"rate 1\": 105, \"rate 2\": 110,", "numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\":", "41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975,", "\"rate 7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using test measure,", "\"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45,", "carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio", "of competed units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "None}, \"commercial\": { \"2009\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\":", "\"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\":", "\"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\":", "-230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\":", "22.5])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\":", "5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\":", "{ \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}},", "cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = {", "8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}},", "Adjust/finalize point value test measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv']", "\"2010\": 31.66775}}, \"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\":", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {},", "carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([", "measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\":", "{\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": {", "0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: { \"rate distribution\": {}}}, \"secondary mseg", "\"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary',", "\"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\":", "1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251,", "\"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point value inputs.\"\"\" #", "2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25,", "w/ some array inputs.\"\"\" # Run the measure competition routine on sample demand-side", "self.test_htcl_adj) # Check updated competed master microsegments for each sample measure # following", "(competed and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {},", "savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3,", "{ \"2009\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate", "{ \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": {", "17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "{ \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\": -150,", "{ \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.11, \"2010\": 11.11}}}, \"energy\": {", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial", "10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366,", "\"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\":", "overlap with 'measures_supply' Measure objects. a_run (object): Analysis engine object incorporating all 'measures_all'", "5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}},", "17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5,", "energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667,", "10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5},", "2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2,", "25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}},", ".15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}},", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\":", "\"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\":", "15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": {", "\"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}},", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output", "\"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": {", "microsegments that should be generated for each Measure object in 'measures_all_dist' following competition", "2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function output given valid input.\"\"\" #", "baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime", "\"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None,", "numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "{ \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "{ \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])},", "\"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\": -380, \"rate", "\"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\":", "57.7, 58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": {", "-0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost", "None, \"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr", "3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001,", "6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466,", "\"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0,", "measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\",", "def main(): \"\"\"Trigger default behavior of running all test fixtures in the file.\"\"\"", "{\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\":", "0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46,", "20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\":", "19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11,", "\"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\":", "cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample", "compete_meas1 (dict): Sample commercial supply-side lighting measure 1. compete_meas2 (dict): Sample commercial supply-side", "test measure, run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\")", "2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}},", "energy (competed and captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed", "be generated given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict): Measure attribute", "-3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2,", "10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682,", "{}, \"mseg_adjust\": { \"contributing mseg keys and values\": {}, \"competed choice parameters\": {},", "2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon", "\"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\":", "{ \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": {", "\"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\":", "numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}},", "{}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = {", "\"\"\"Test for correct outputs given valid inputs.\"\"\" # Create an Engine instance using", "{\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\": 40}, \"Cooling\": {\"2009\":", "5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08,", "\"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "{\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\":", "{ \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\":", "\"stock\": { \"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\":", "66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33},", "(dict): Sample residential supply-side cooling measure 2. compete_meas5 (dict): Sample residential supply-side cooling", "9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499,", "2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}},", "\"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\":", "measure master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment", "{ \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954,", "x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply =", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial", "\"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\":", "\"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock cost\": {", "0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\",", "\"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\":", "31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"cost\": { \"stock\": { \"total\":", "\"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\":", "node, formatted as a numpy array # (for input uncertainty test cases) elif", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\":", "benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592,", "\"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9,", "\"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\":", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {", "numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\":", "8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0,", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\":", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\":", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None,", "numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00,", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\":", "costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon costs)\": {", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\":", "2\": -440, \"rate 3\": -145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\":", "the measure competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) #", "\"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\":", "\"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\":", "4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2,", "\"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\":", "all competing/interacting sample Measure objects with point value inputs. measures_demand (list): Demand-side subset", "[ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set", "\"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure c3\", \"climate_zone\":", "Sample discount rate. ok_master_mseg_point (dict): Sample measure master microsegment including all point values", "output given valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test", "15, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15,", "{ \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\":", "\"2010\": 150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\":", "22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}},", "numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07,", "numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([", "60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}},", "Sample measure master microsegment including all point values at terminal leaf nodes. ok_master_mseg_dist1", "7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", "{\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\":", "105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15},", "20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "}}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for use in all tests", "\"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{", "Sample commercial supply-side lighting measure 1. compete_meas2 (dict): Sample commercial supply-side lighting measure", "True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1,", "numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]),", "30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1,", "\"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10 for", "Sample measure master microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample measure master", "of sample input cash flows. ok_out (list): Outputs that should be generated for", "array test measure consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\":", "\"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\":", "point value inputs.\"\"\" # Initialize test measure and assign it a sample 'uncompeted'", "7\": -200}, \"2010\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190,", "{\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "\"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\":", "\"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\":", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176,", "7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0,", "[\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"],", "list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows yield correct", "{}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\":", "{ \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10", "Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid", "scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95},", "finalize array test measure consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\": {", "# For first test case, verify correct adoption/competition scenario # keys for measure", "'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1',", "cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist", "0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption", "33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\":", "20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}},", "(grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing')) cls.compete_meas1 =", "-1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08,", "26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": { \"baseline\":", "engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars,", "<reponame>NREL/scout<gh_stars>0 #!/usr/bin/env python3 \"\"\" Tests for running the engine \"\"\" # Import code", "7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}},", "\"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\": 13.02227}, \"efficient\":", "\"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": [\"general service (LED)\"]},", "point value inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset", "15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\":", "40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775,", "9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336,", "\"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\":", "3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon", "{ \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": {", "\"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": {", "\"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max", "2.147181])}, \"irr (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580,", "0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy", "numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": {", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -90, \"rate 2\": -95,", "import os class CommonTestMeasures(object): \"\"\"Class of common sample measures for tests. Attributes: sample_measure", "\"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "\"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\":", "self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply and", "[0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\":", "measures_all_dist (list): List including competing/interacting sample Measure objects with array inputs. measures_demand_dist (list):", "Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme", "10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40,", "10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\":", "{ \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}},", "inputs.\"\"\" # Create an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list)", "{ \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": {", "should be generated given valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "\"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\":", "{ \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\":", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04,", "Sample measure avoided carbon costs. ok_out_dicts (list): Output annuity equivalent Net Present Value", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2", "\"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "cls.measures_all) # Set information needed to finalize array test measure consumer # metrics", "that function properly applies a climate zone/building type/end use partition to a total", "0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07,", "1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\":", "# Confirm that at the current location in the dict structure, # the", "{ \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": {", "-2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08,", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\":", "3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon costs)\": { \"2009\": 4.54, \"2010\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]),", "11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test", "and associated contributing microsegment keys that overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict):", "False}, { \"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings", "11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\":", "\"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45,", "\"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "finalize point value test measure # consumer metrics consumer_metrics_final = [{ \"stock cost\":", "{ cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "-10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": {", "{ \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])},", "-10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\":", "\"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\":", "external code execution (include all lines below this point in all # test", "technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio.", "}}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure 5 (commercial)\", \"active\": 1,", "15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\":", "5\": -155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\": { \"rate 1\": -435,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001,", "all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure", "copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5],", "\"irr (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860,", "20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}},", "\"Technical potential\": { \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False,", "\"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\":", "\"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 51, \"2010\":", "\"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\":", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2])", "{\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "inputs yield correct anpv, irr, payback, and # cost of conserved energy/carbon outputs", "5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": {", "measure consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist =", "[10, 4, 7, 8, 10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5,", "2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": {", "of all competing measures with point value inputs. measures_secondary (list): Subset of 'measures_all'", "30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -150,", "# Adjust/finalize point value test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures):", "benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490,", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13,", "4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\":", "\"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)},", "\"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0, 1.5,", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array(", "{ \"savings (total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3,", "\"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, {", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([", "{ \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954,", "\"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]),", "tested. compete_meas1 (dict): Sample residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version", "30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30,", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity", "30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {", "150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571,", "'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize", "function. Verify that measure master microsegment inputs yield expected savings and financial metrics", "{\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\":", "\"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-50, -100, -10])},", "terminal/leaf node lists in a dict to numpy arrays. Attributes: handyvars (object): Useful", "-3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings", "(competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2", "115}, \"2010\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate", "100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure r2\", \"climate_zone\": [\"AIA_CZ1\"],", "\"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {", "20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\": {\"2009\": 10,", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\":", "object based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed", "= { \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": {", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {", "\"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\":", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\":", "20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\":", "given valid inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2)", "63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": {", "\"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/", "Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should be generated", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given", "'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary market shares and", "1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\": -105, \"rate 5\":", "[20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": {", "captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\":", "1\": 205, \"rate 2\": 100, \"rate 3\": 105, \"rate 4\": 110, \"rate 5\":", "20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20,", "2], [10, 4, 7, 8, 10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950,", "115}, { \"rate 1\": 205, \"rate 2\": 100, \"rate 3\": 105, \"rate 4\":", "inputs.\"\"\" # Initialize test measure and assign it a sample 'uncompeted' # market", "measures_overlap1 (dict): List of supply-side Measure objects and associated contributing microsegment keys that", "data with lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and", "5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273,", "0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)])", "first item # in the tuple is the key and the second item", "\"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self):", "test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using test measure, run function", "110, \"rate 3\": 120, \"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150,", "4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\":", "{ \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0,", "[20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\":", "{ \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\":", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) # Reset aeo_years", "{ \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": {", "{\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\":", "0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 =", "-5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1,", "}}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\":", "numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment", "and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary market shares and updates", "array # (for input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2))", "9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499,", "{ \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": { \"Technical", "50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5,", "carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222,", "{\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\":", "\"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2", "None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\",", "6\": -230, \"rate 7\": -200}, \"2010\": { \"rate 1\": -190, \"rate 2\": -195,", "-180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\": { \"rate 1\": -190, \"rate", "{ \"contributing mseg keys and values\": { cls.adjust_key2: { \"stock\": { \"total\": {", "{ cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": {", "consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with", "6\": -230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\":", "\"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\":", "\"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3,", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02,", "\"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\":", "captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist = {", "cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc", "including one sample residential measure. ok_cashflows (list): Set of sample input cash flows.", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self):", "energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([", "CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\":", "\"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\":", "converts terminal/leaf node lists in a dict to numpy arrays. Attributes: handyvars (object):", "CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\":", "engine object incorporating all 'measures_primary' objects. measures_all_dist (list): List of competing measures including", "0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\": numpy.array([ 1.670251, 7.816181, 0.01637724])}},", "metrics consumer_metrics = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\":", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2,", "5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy", "annuity equivalent Net Present Value dicts that should be generated given valid sample", "95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\":", "13.02227, 13.64868, 10.14500])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136,", "-1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\":", "\"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": {", "nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy, carbon, and energy/carbon cost", "for use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())", "tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])]))", "of sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}},", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "\"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\":", "\"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed", "None}}, \"energy cost\": { \"residential\": { \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\":", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -400,", "any market overlaps across the supply and demand sides of # heating and", "150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "\"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15,", "distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [", "20, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10,", "15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336,", "cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate", "\"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\":", "potential\": { \"key 1\": { \"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5],", "{\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14,", "measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\":", "106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7,", "microsegments for each sample measure # following competition/secondary microsegment adjustments for ind, d", "30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15},", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": {", "consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{", "numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200},", "(total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings", "\"rate 6\": -115, \"rate 7\": -120}, \"2010\": { \"rate 1\": -90, \"rate 2\":", "numpy.array([ numpy.pmt(0.07, 1, -0.51), numpy.pmt(0.07, 1, -0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794),", "{ \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\":", "(total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2,", "\"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\":", "8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "\"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "zip_longest() will use the fill value created below as a # substitute in", "the measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) #", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial", "0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family", "measures_overlap1_dist (dict): List of supply-side Measure objects and associated contributing microsegment keys that", "(annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271,", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": {", "numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any", "{\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": {", "of the dicts # is empty, is missing section(s), or has different key", "numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0,", "{ \"total\": { yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\": { yr:", "11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66,", "\"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, {", "[\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str(", "195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4,", "{\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\":", "\"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5},", "\"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": {", "\"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\":", "{\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21,", "portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist3' with a residential sample", "\"2009\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\":", "costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}]", "(dict): Sample residential supply-side cooling measure 1. compete_meas3_dist (dict): Alternative version of sample", "{ \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0,", "cls.compete_meas3 = { \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\":", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = {", "(w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon", "in a dict to numpy arrays. Attributes: handyvars (object): Useful variables across the", "10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([", "self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2])", "be generated for each set of sample cash flows. \"\"\" @classmethod def setUpClass(cls):", "20, \"2010\": numpy.array([10, 12, 14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "\"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065,", "8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])},", "1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "first test case, verify correct adoption/competition scenario # keys for measure markets/savings/portfolio metrics", "numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59, 2, 0.67,", "5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\":", "'convert_to_numpy' function. Verify that the function converts terminal/leaf node lists in a dict", "numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}]", "numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 1, 0.9345794), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", "generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict): Measure attribute update", "numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\":", "\"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 17, \"2010\":", "96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9,", "cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))],", "in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) #", "current level of the recursive # exploration of dict1 and dict2, respectively for", "setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() handyvars", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\":", "0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": { \"Technical potential\":", "master microsegments for a series of competing commercial measures; and that 'secondary_adj' correctly", "-200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\":", "values\": {}, \"competed choice parameters\": {}, \"secondary mseg adjustments\": { \"market share\": {", "Verify that function properly applies a climate zone/building type/end use partition to a", "\"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\": { \"rate 1\":", "numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": {", "30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15,", "3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and", "= { \"name\": \"sample measure 1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\":", "10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40,", "carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": {", "-0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073,", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\":", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366},", "\"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\":", "7\": 115}, { \"rate 1\": 205, \"rate 2\": 100, \"rate 3\": 105, \"rate", "\"efficient\": {\"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\":", "\"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\",", "\"nested key 1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key", "for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes)", "{ \"name\": \"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\":", "\"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed and captured)\": {", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2',", "[\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\":", "numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None,", "17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02,", "measures_overlap (dict): List of supply-side Measure objects and associated contributing microsegment keys that", "{\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\":", "{\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]), \"2010\":", "1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08,", "residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version of sample residential demand-side", "22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array( [25.1,", "# test files) def main(): \"\"\"Trigger default behavior of running all test fixtures", "None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\":", "2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 2,", "1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([ 0.5567503,", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\":", "3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726,", "financial metrics that should be generated given 'ok_master_mseg_point' with a residential sample measure.", "(w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\":", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\":", "{ \"rate distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy", "5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\":", "1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\":", "61, 5, 54])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\":", "\"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\":", "any secondary markets associated with these primary market microsegments. Attributes: handyvars (object): Useful", "50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\":", "2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\":", "90, \"rate 3\": 95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110,", "i2) # Continue to recursively traverse the dict self.dict_check(i, i2) # At the", "{ \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5),", "(int): Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671,", "\"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15},", "1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45,", "'existing')) cls.compete_meas1 = { \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"],", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\":", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class MetricUpdateTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation", "\"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\":", "and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": {", "4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\": {", "\"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10},", "\"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\":", "\"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\":", "consumer adoption scheme. overlap_key (string): First sample string for competed primary market microsegment", "terminal/leaf node, formatted as a point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase):", "{ \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375),", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given", "{}}}} cls.compete_meas2 = { \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"],", "20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250,", "2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2,", "10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": {", "numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": {", "associated contributing microsegment keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine", "1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": { \"2009\":", "= [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist =", "5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2,", "os class CommonTestMeasures(object): \"\"\"Class of common sample measures for tests. Attributes: sample_measure (dict):", "7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "-370}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\":", "be generated given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict): Measure attribute", "compete_meas5 (dict): Sample residential supply-side cooling measure 3. measures_all (list): List of all", "6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571,", "the dicts are not of identical size, # zip_longest() will use the fill", "1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\":", "\"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "\"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\":", "point values. measures_all (list): List of all competing measures with point value inputs.", "6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\":", "-5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\":", "\"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]),", "\"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\":", "# metrics consumer_metrics = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\":", "40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1},", "\"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point", "0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\":", "residential measure. ok_num_units (int): Sample number of competed units. ok_base_life (int): Sample baseline", "\"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "\"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\":", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": {", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -40,", "-0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon", "\"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\": { \"rate 1\":", "numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08),", "shares and updates master microsegments for a series of competing commercial measures; and", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}},", "List for Engine including one sample residential measure. ok_num_units (int): Sample number of", "\"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\":", "19.5, 24])}}, \"competed\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\":", "\"2010\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145, \"rate 4\":", "(w/ energy and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy", "test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus", "4, 5], \"nested key 2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\": {", "\"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\":", "def test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point value inputs.\"\"\" # Run", "\"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "{\"2009\": 10, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\":", "and zip_longest() produce tuples for the items # identified, where in the case", "(w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33,", "\"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091),", "\"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "Net Present Value dicts that should be generated given valid sample inputs. ok_out_array", "\"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\":", "{} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\", \"active\":", "\"2009\": 100, \"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": {", "\"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08,", "{ \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])},", "\"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "\"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\":", "\"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1,", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45,", "numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455,", "70, \"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110},", "microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase,", "3\": 105, \"rate 4\": 110, \"rate 5\": 115, \"rate 6\": 120, \"rate 7\":", "adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "{ \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}},", "{\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs given valid inputs.\"\"\" # Create", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "{}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\":", "content; this # value is given as a tuple to be of comparable", "20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": {", "9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01,", "110}, \"2010\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = {", "\"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227,", "measure master microsegment inputs yield expected savings and financial metrics outputs. Attributes: handyvars", "2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07,", "\"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\":", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": {", "cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows',", "-155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\": { \"rate 1\": -435, \"rate", "\"\"\"Trigger default behavior of running all test fixtures in the file.\"\"\" unittest.main() if", "numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\":", "sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across", "{ \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\":", "5}, \"key 2\": 10.8}, \"Max adoption potential\": { \"key 1\": { \"nested key", "5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205,", "None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\":", "measures_all (list): List of all competing/interacting sample Measure objects with point value inputs.", "supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should be generated for", "'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict): Measure attribute update status, savings,", "24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}},", "numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114,", "{ \"2009\": numpy.array([2.23, 9.77, 0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\":", "141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\":", "generated given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\":", "[\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point value inputs.\"\"\"", "{ \"Technical potential\": { \"key 1\": { \"nested key 1\": [1, 2, 3,", "0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00,", "# substitute in the dict that has missing content; this # value is", "-1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/", "50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\":", "compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars,", "{ \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": {", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life", "(list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects and", "adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed", "measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock", "for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x)", "\"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\":", "17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681,", "lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366,", "numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44),", "{ \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}},", "90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\":", "0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0,", "across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure =", "numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([", "numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": {", "microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self):", "1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\": 100, \"rate 5\":", "\"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\":", "6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "\"rate 4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 #", "8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\":", "\"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950,", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\":", ".15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25,", "\"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = {", "for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme]", "\"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0,", "correct outputs given valid inputs.\"\"\" # Create an Engine instance using sample_measure list", "7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 4.442382,", "\"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\":", "\"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\":", "10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887,", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point value", "1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336,", "\"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify", "= { \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\":", "{ \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": {", "14])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\":", "cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based on above measures cls.a_run =", "Master market microsegments that should be generated for each Measure object in 'measures_all_dist'", "4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}, \"2010\": {", "\"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\":", "that cashflow inputs generate expected prioritization metric outputs. Attributes: handyvars (object): Useful variables", "below this point in all # test files) def main(): \"\"\"Trigger default behavior", "'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for", "26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\":", "\"adjusted energy (competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\":", "0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": {", "needed packages import unittest import numpy import copy import itertools import os class", "1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 20,", "Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap data. adjust_key1", "measures; and that 'htcl_adj' properly accounts for heating and cooling supply-demand overlaps. Attributes:", "0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699,", "Outputs that should be generated for each set of sample cash flows. \"\"\"", "family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": {", "5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30},", "0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 15}}, \"competed\": {", "{ \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\":", "10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": {", "measure 2. compete_meas5 (dict): Sample residential supply-side cooling measure 3. measures_all (list): List", "adjust_key2 (string): Second sample string for competed demand-side and supply-side market microsegment key", "{} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods for use in all", "\"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\":", "cost\": { \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\":", "0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy", "\"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\":", "2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07, 2, 2.079221)]), \"2010\": numpy.array([", "2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221,", "7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array( [5, 6, 7]), \"2010\": numpy.array( [5,", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([-150,", "numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0,", "\"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5,", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "compete_meas1 (dict): Sample residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version of", "\"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\":", "x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function. Verify cashflow input", "\"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0,", "125}, { \"rate 1\": 105, \"rate 2\": 110, \"rate 3\": 115, \"rate 4\":", "-0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.0396936,", "\"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\":", "4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\":", "(string): Sample consumer adoption scheme. overlap_key (string): First sample string for competed primary", "{ \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0),", "1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5,", "numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": {", "numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36,", "\"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\":", "11.11183, 11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\":", "supply-demand overlap data. adjust_key1 (string): First sample string for competed demand-side and supply-side", "measure # following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind],", "(grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\":", "(total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8,", "None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test", "rate. ok_master_mseg_point (dict): Sample measure master microsegment including all point values at terminal", "numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": {", "\"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\":", "\"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60, \"2010\": 60}}, \"competed\":", "3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556,", "2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate", "updates master microsegments for a series of competing commercial measures; and that 'secondary_adj'", "numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08,", "'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity", "\"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\":", "\"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\": None,", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([", "44.97110])}, \"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}},", "2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114,", "numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\":", "microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock", "[{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True, \"competed\": True},", "numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "\"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\":", "numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([", "energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings", "\"2010\": numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0,", "\"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\":", "in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'. Verify", "assign it a sample 'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this test", "{ \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": {", "-0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([", "cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}},", "\"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50,", "2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346),", "\"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\":", "{ \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22,", "with a residential sample measure. ok_out_dist2 (dict): Measure attribute update status, savings, and", "\"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\":", "lists in a dict to numpy arrays. Attributes: handyvars (object): Useful variables across", "equivalent Net Present Value dicts that should be generated given valid sample inputs.", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\":", "12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}},", "\"2010\": 5}, \"measure\": {\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\":", "None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\":", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}},", "[\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str( ('primary', 'AIA_CZ1', 'assembly',", "{ \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}},", "\"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\":", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and", "{}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and", "(competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete", "0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample measures w/ some array inputs.\"\"\" #", "2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2,", "run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using test measure,", "\"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\":", "\"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\":", "\"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\": { \"2009\":", "numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings", "\"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\":", "{ \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": {", "18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),", "0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\":", "self.test_adopt_scheme, \"uncompeted\") # For first test case, verify correct adoption/competition scenario # keys", "-0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\":", "\"rate 7\": 160}, \"2010\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\":", "cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing'))", "test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance using test measure, run function", "{ \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": {", "{}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"],", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}},", "\"\"\"Test the operation of the 'calc_savings_metrics' function. Verify that measure master microsegment inputs", "\"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6,", "\"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\":", "numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": {", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family", "{ \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\":", "numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])},", "\"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string): First sample string for", "\"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\",", "[run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]]", "key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external code execution (include", "0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{", "0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87,", "class CommonMethods(object): \"\"\"Define common methods for use in all tests below.\"\"\" def dict_check(self,", "numpy.array([8.02, 8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": {", "should be generated given 'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict): Measure", "(competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = { \"name\": \"sample measure", "savings. ok_ecostsave (int): Sample measure energy cost savings. ok_csave (int): Sample measure avoided", "1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975,", "(w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{", "= str( ('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd =", "Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given", "39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\":", "40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}},", "\"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\":", "\"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\":", "benefits)\": { \"2009\": numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08,", "{}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x)", "1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\":", "CommonMethods): \"\"\"Test the operation of the 'metrics_update' function. Verify that cashflow inputs generate", "0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15,", "18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}},", "heating\", \"cooling\"]}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010,", "{ \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": {", "\"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\":", "15.21750]), \"2010\": numpy.array([ 19.53341, 20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]),", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 10}}}, \"cost\":", "numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": {", "\"total\": { \"baseline\": {\"2009\": 51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\":", "(dict): Sample measure master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure", "\"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\":", "generated for each set of sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr in cls.handyvars.aeo_years}, \"affected", "= cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5],", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {", "2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\":", "numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])},", "value inputs. measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset of", "{ \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749, -0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353,", "1}, cls.overlap_key_scnd: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon costs)\": {", "use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years", "-10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100,", "\"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]),", "4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2,", "routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market", "\"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794),", "{\"2009\": 10, \"2010\": 10}, \"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\":", "\"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2,", "numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\":", "1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20,", "-8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794),", "-0.02023954, -0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823,", "5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}},", "[\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\":", "sample inputs. ok_out_array (list): Other financial metric values that should be generated given", "\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\":", "\"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\":", "output given commercial measure with point value inputs.\"\"\" # Initialize test measure and", "\"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\":", "compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "self.ok_master_mseg_dist3 # Create Engine instance using test measure, run function on it engine_instance", "across the class. sample_measure_res (object): Sample residential measure data. sample_measure_com (object): Sample commercial", "{ \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize", "assign it a sample 'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this test", "1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001,", "\"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\":", "a dict, the first item # in the tuple is the key and", "\"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170}, \"2010\": { \"rate 1\":", "0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]),", "'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly',", "engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure results", "payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods):", "def test_attributes(self): \"\"\"Compare object attributes to keys from input dict.\"\"\" for key in", "numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "\"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\":", "0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000,", "Run the measure competition routine on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme)", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array(", "[5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30},", "# Adjust/finalize point value test measure consumer metrics for ind, m in enumerate(cls.a_run.measures):", "inputs yield expected savings and financial metrics outputs. Attributes: handyvars (object): Useful variables", "sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows yield", "numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])},", "{ \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]), \"2010\": numpy.array([ 2.227001, 10.25874, 0.02119408])}}, \"competed\": {", "sample input cash flows. ok_out (list): Outputs that should be generated for each", "\"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\":", "'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity", "= CommonTestMeasures().sample_measure measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\":", "ok_out_array (list): Other financial metric values that should be generated given valid sample", "\"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "{ \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": {", "\"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\":", "25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}},", "adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue(", "CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes", "measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\": { \"stock\": {", "from zip_longest() fill_val = ('substituted entry', 5.2) # In this structure, k and", "\"measure\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\":", "5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20,", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": {", "22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11,", "= {\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\": {", "cls.adjust_key1: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key1: { \"2009\": 100, \"2010\":", "5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon", "\"2009\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\":", "\"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([16, 27, 31, 6, 51]),", "array inputs.\"\"\" # Run the measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary(", "\"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample measure 3 (commercial)\", \"active\": 1, \"market_entry_year\":", "140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\":", "and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\":", "48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": {", "\"nested key 2\": 2}, \"key 2\": 5.8}}} def test_numpy_convert(self): \"\"\"Test for correct function", "fill_val = ('substituted entry', 5.2) # In this structure, k and k2 are", "to link primary and secondary market microsegments (by climate, building type, structure type).", "sample measures w/ point value inputs.\"\"\" # Run measure competition routine on sample", "1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "\"rate 3\": -100, \"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate", "measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out =", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures", "attributes are correctly initiated. Attributes: sample_measure (object): Residential sample measure object. attribute_dict (dict):", "{ \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954,", "adjust. a_run_dist (object): Analysis engine object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List", "metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\":", "numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": {", "captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1,", "43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455,", "given valid sample measures w/ point value inputs.\"\"\" # Run the measure competition", "\"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\":", "\"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array inputs.\"\"\"", "\"rate 7\": 115}, { \"rate 1\": 205, \"rate 2\": 100, \"rate 3\": 105,", "\"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5,", "{ \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg", "{ \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10},", "(total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, {", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self):", "17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}},", "2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502,", "\"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {\"2009\": None,", "(annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200},", "i and i2, # respectively, at the current level of the recursive #", "\"2009\": 10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40,", "\"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67,", "input values instead of point values. compete_meas4 (dict): Sample residential supply-side cooling measure", "\"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\":", "3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": { \"baseline\": {\"2009\": 1.670251, \"2010\": 1.670251},", "{ \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20,", "2\": -60, \"rate 3\": -70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\":", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output", "numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550,", "{ \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}},", "self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each sample", "ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave (int): Sample measure avoided carbon", "\"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2,", "\"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\":", "14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\":", "\"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd() handyvars =", "-0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731,", "for each Measure object in 'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist", "numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3,", "-0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([", "and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete measure r5\",", "measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "\"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018),", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "adoption/competition scenario # keys for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: #", "\"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\":", "\"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"carbon\": { \"total\": { \"baseline\":", "\"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222,", "{ \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\": 100},", "files) def main(): \"\"\"Trigger default behavior of running all test fixtures in the", "\"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"],", "the class. measure_list (list): List for Engine including one sample residential measure. ok_num_units", "self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of", "\"technology\": {\"primary\": [\"general service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\":", "4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\":", "on sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps", "\"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "measures for tests. Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2 (dict): Sample", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4", "key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk'", "\"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\":", "\"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\":", "19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10,", "1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector", "{ \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": { \"2009\": 0,", "2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07,", "# Instantiate engine object based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) #", "8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0,", "\"2010\": 8}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22,", "the key and the second item is the value; # in the case", "2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535),", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def", "{\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": {", "1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571,", "= 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave = 7.5 cls.ok_ecostsave =", "Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure stock cost delta. ok_esave (int):", "'lighting', 'reflector (LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "(list): List of all competing measures with point value inputs. measures_secondary (list): Subset", "1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\":", "\"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1,", "10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73, 0.02, 9.60])}},", "1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07,", "41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2,", "dicts are not of identical size, # zip_longest() will use the fill value", "45}}}} def test_ok(self): \"\"\"Test for correct function output given valid inputs.\"\"\" dict1 =", "= run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using test", "\"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate", "3.054054, 3.931585, 6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176,", "{ \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([", "10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89,", "3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\":", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array(", "types in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\",", "14, 2, 3, 4], [-10, 0, 1, 2], [10, 4, 7, 8, 10],", "\"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\":", "{\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": {", "3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/", "\"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.670251, 7.816181, 0.01637724]), \"2010\":", "self.test_adopt_scheme) # Remove any market overlaps across the supply and demand sides of", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 =", "{\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10, \"2010\": 15}}, \"carbon\": {", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30,", "'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information", "20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}},", "{ \"2009\": 51, \"2010\": numpy.array([36, 39, 48])}, \"efficient\": { \"2009\": 34, \"2010\": numpy.array([24,", "(dict): Sample residential measure #2. sample_measure3 (dict): Sample commercial measure #1. \"\"\" def", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level", "\"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "3 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\":", "energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([ 0.3344482,", "dict to numpy arrays. Attributes: handyvars (object): Useful variables across the class. sample_measure", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure", "{\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\":", "supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj)", "15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([", "\"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\":", "use the fill value created below as a # substitute in the dict", "zip_longest() produce tuples for the items # identified, where in the case of", "{ \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44,", "captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key:", "0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given", "{ \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08,", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3])", "105, \"rate 2\": 110, \"rate 3\": 115, \"rate 4\": 120, \"rate 5\": 125,", "captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 =", "31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\":", "choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10,", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array(", "\"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\":", "0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array inputs.\"\"\" #", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\":", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\":", "some measures with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments", "\"2010\": None }, \"commercial\": { \"2009\": None, \"2010\": numpy.array([ { \"rate 1\": 85,", "numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\":", "\"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.003046667, -0.01407333, -0.05267604, -0.05230731, -0.07946463]),", "\"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373,", "18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30,", "{\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89,", "numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([", "4.09, 4.50])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\":", "\"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\":", "0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([1.11, 4.89,", "numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282),", "associated cost input values instead of point values. compete_meas2 (dict): Sample residential demand-side", "1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]), \"2010\": numpy.array([ 1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])}, \"irr", "Engine including one sample residential measure. ok_num_units (int): Sample number of competed units.", "for yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for yr in cls.handyvars.aeo_years},", "competition routine on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any", "and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure2 = { \"name\": \"sample measure 2\",", "-0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]),", "numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}},", "6.511136}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\":", "17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499,", "10.25874, 0.02119408])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([", "for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist = [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars,", "numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]),", "0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114},", "\"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\":", "test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict):", "numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "Key used to link primary and secondary market microsegments (by climate, building type,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\":", "Engine object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that should", "{ \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114, \"2010\": 1.73179114}}, \"competed\": {", "{\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\":", "21.7, 19.2, 20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35},", "7\": 110}, \"2010\": { \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70,", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\":", "Sample measure avoided carbon emissions. ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts", "{ \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\":", "1\": { \"nested key 1\": [1, 2, 3, 4, 5], \"nested key 2\":", "numpy import copy import itertools import os class CommonTestMeasures(object): \"\"\"Class of common sample", "\"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08,", "\"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([", "5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948, 8.181351])}, \"payback (w/ energy", "microsegment keys that overlap with 'measures_supply' Measure objects. a_run (object): Analysis engine object", "6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34,", "cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,' and 'htcl_adj'.", "{\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009,", "PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function. Verify cashflow input generates expected", "(object): Useful variables across the class. sample_measure (object): Sample measure data with lists", "2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\":", "0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\":", "6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "objects. measures_overlap2 (dict): List of demand-side Measure objects and associated contributing microsegment keys", "\"2010\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\":", "\"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array( [0, 1, 2]),", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2,", "200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\":", "0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "flows. ok_out (list): Outputs that should be generated for each set of sample", "15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50,", "'reflector (LED)')): { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "numpy.array([ 1.670251, 7.816181, 0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]),", "[{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "keys and values\": { cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "master microsegment including stock cost and measure lifetime array. ok_out_point_res (dict): Measure attribute", "3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/ energy and carbon", "7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1},", "i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current location in", "Dict of sample measure attributes. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use", "\"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"],", "costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553,", "-2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08,", "[ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654), None, None, None,", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\":", "\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}},", "(w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34,", "8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\":", "24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": {", "-0.75}}}, \"carbon cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "objects and associated contributing microsegment keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist", "energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\":", "{ \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\":", "and carbon costs)\": { \"2009\": numpy.array([ 4.713113, 4.884221, 5.309580, 2.908860, 5.394281]), \"2010\": numpy.array([", "1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {", "market microsegments. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample", "\"market share\": { \"original energy (total captured)\": {}, \"original energy (competed and captured)\":", "copy import itertools import os class CommonTestMeasures(object): \"\"\"Class of common sample measures for", "function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave)", "42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775,", "{ \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794),", "point value else: self.assertAlmostEqual(i, i2, places=2) class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are", "= 50 cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6,", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "\"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"],", "\"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0,", "correct function output given valid input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure)", "in the tuple is the key and the second item is the value;", "in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977,", "{\"2009\": 60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\":", "numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "3. measures_all (list): List of all competing/interacting sample Measure objects with point value", "measures_all_dist (list): List of competing measures including some measures with array inputs. measures_secondary_dist", "{\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001,", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\":", "{\"2009\": 0.87, \"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114},", "-70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400},", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": {", "dict1 and dict2, respectively for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val):", "portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist4' with a residential sample", "{\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\":", "10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443,", "{\"2009\": 30, \"2010\": 10}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20,", "None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy", "following competition/secondary microsegment adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"])", "\"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\":", "130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy cost\": {", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {", "(list): List for Engine including one sample residential measure. ok_num_units (int): Sample number", "equal self.assertCountEqual(i, i2) # Continue to recursively traverse the dict self.dict_check(i, i2) #", "across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure =", "value test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind]", "{ \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": {", "12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}, \"carbon\": { \"total\": { \"baseline\":", "ok_out_dist4 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "'AIA_CZ2', 'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\":", "{\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\":", "Set information needed to finalize array test measure consumer # metrics consumer_metrics_dist =", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance", "\"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0,", "{\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\":", "{ \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon costs)\": { \"2009\":", "0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "{\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback", "including lists of energy/carbon and associated cost input values instead of point values.", "{ \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\":", "1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "{\"2009\": 1.670251, \"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": {", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample", "compete_meas3_dist (dict): Alternative version of sample residential supply-side cooling measure 1 including lists", "for use across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())", "40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function output", "run.Engine(self.handyvars, self.measure_list) # Test that valid input cashflows yield correct output payback values", "\"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\":", "test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using test measure, run function", "1.219282), \"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}, \"carbon cost\": { \"residential\":", "def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point value inputs.\"\"\" # Initialize", "10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\":", "the current level of the recursive # exploration of dict1 and dict2, respectively", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 =", "\"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15,", "0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon costs)\": { \"2009\":", "6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5,", "{ \"total\": { \"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": {", "\"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None,", "\"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\":", "2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate", "\"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost savings (annual)\": {\"2009\":", "\"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\": {\"2009\": 13.02227, \"2010\":", "\"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5,", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array", "\"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15,", "0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\": {", "\"rate 2\": -195, \"rate 3\": -190, \"rate 4\": -205, \"rate 5\": -180, \"rate", "\"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": {", "('ok_master_mseg_dist4'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183,", "\"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\":", "home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1',", "4.335205, 4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy", "cls.ok_out_point_res = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "\"key 2\": 10.8}, \"Max adoption potential\": { \"key 1\": { \"nested key 1\":", "7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9,", "\"2009\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\":", "cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] #", "\"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\":", "10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16,", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\":", "\"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"Heating\": {\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\":", "2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate 7\": -0.5}, \"2010\": { \"rate", "numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}},", "5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock cost\": { \"residential\":", "on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize", "'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array", "(dict): Sample commercial supply-side lighting measure 1. compete_meas2 (dict): Sample commercial supply-side lighting", "{ \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428,", "unpartitioned measure results data. ok_partitions (dict): Sample results partitioning fraction. ok_out (dict): Sample", "-2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08,", "test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\":", "\"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15,", "(competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\":", "\"baseline\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\":", "\"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": [\"heating\", \"secondary heating\", \"cooling\"]}, \"technology\": [\"reflector", "numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552),", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\": {\"2009\": 15,", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489,", "20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250,", "test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using", "self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated competed master microsegments for each sample measure", "measure. ok_cashflows (list): Set of sample input cash flows. ok_out (list): Outputs that", "\"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "inputs.\"\"\" dict1 = self.a_run.out_break_walk( self.ok_partitions, self.ok_total) dict2 = self.ok_out self.dict_check(dict1, dict2) class PrioritizationMetricsTest(unittest.TestCase,", "flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\"", "\"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "\"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "{ \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": {", "\"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\":", "\"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\":", "45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "(annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271,", "\"2009\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2,", "microsegment inputs yield expected savings and financial metrics outputs. Attributes: handyvars (object): Useful", "99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9,", "\"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}}, \"competed\": { \"baseline\":", "\"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30,", "{}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure r3\", \"climate_zone\":", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835,", "\"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2,", "\"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "'ok_master_mseg_dist1' with a residential sample measure. ok_out_dist2 (dict): Measure attribute update status, savings,", "-9.855809e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "{ \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}},", "4\": 80, \"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}}}, \"energy cost\":", "\"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\":", "\"consumer metrics\": False}, { \"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\": -10},", "140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\": { \"rate 1\": 100, \"rate", "11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9,", "[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run =", "{}}}} self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\":", "cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "-115, \"rate 7\": -120}, \"2010\": { \"rate 1\": -90, \"rate 2\": -95, \"rate", "\"2010\": 33}}, \"competed\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5, \"2010\":", "# Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify", "numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550,", "7\": 160}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": {", ".30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40,", "{ \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\": -60,", "14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}},", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3]) def", "\"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\":", "10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": {", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227,", "\"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}}}, \"carbon", "30}, \"efficient\": { \"2009\": numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}},", "8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\":", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4", "\"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key2: { \"stock\":", "(annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415,", "savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7,", "\"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 120, \"2010\":", "46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}},", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}}, \"energy\":", "ok_esave (int): Sample measure energy savings. ok_ecostsave (int): Sample measure energy cost savings.", "{}, \"competed choice parameters\": {}, \"secondary mseg adjustments\": { \"market share\": { \"original", "\"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\":", "8.65, 5.14]), \"2010\": numpy.array([8.02, 8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "structure, # the keys are equal; this should fail if one of the", "9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\":", "structure # to the normal output from zip_longest() fill_val = ('substituted entry', 5.2)", "\"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\":", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00,", "\"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "{\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60},", "1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "105, 106.1])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6,", "node lists in a dict to numpy arrays. Attributes: handyvars (object): Useful variables", "1, 2])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\":", "in the case of a dict, the first item # in the tuple", "{ \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10},", "\"baseline\": {\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\":", "0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]),", "{ \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\": {}, \"competed choice", "of competing commercial measures; and that 'secondary_adj' correctly adjusts any secondary markets associated", "'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize point value", "overlaps. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample consumer", "\"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489,", "{\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173, 169,", "cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock cost array. ok_master_mseg_dist3", "**sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14,", "numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33,", "for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": {", "7\": -75}, \"2010\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55,", "{ \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\":", "cls.ok_out_point_com = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "\"2010\": 13.02227}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\":", "1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.overlap_key: { \"stock\": {", "Sample measure data with lists to convert. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "-5, \"2010\": -10}, \"cost savings (annual)\": {\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\":", "= { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\": 10, \"2010\": 10}, \"Cooling\":", "0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/", "\"competed\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\":", "cost\": { \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": { \"2009\": None, \"2010\":", "cost savings. ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave (int): Sample measure", "25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}},", "12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\": {", "{ \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\":", "home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set", "numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": {", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single", "{ \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100, 90])}, \"commercial\": { \"2009\": None,", "numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def", "\"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\":", "(competed and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\":", "cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]), \"2010\": numpy.array([ -0.09966428,", "\"market share\": { \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}},", "numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": {", "{ \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": {\"2009\":", "39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455,", "including all point values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master", "in 'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\":", "10], [-100, 0, 1]] cls.ok_out = [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self):", "numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1},", "-145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}}},", "\"rate 4\": -105, \"rate 5\": -110, \"rate 6\": -115, \"rate 7\": -120}, \"2010\":", "63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775,", "{ \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 30, \"2010\": 30}}, \"competed\": {", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13,", "= 7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array =", "fill value created below as a # substitute in the dict that has", "\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\":", "{ \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"carbon cost\": { \"residential\": { \"2009\":", "\"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}}, \"energy\": { \"total\":", "given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict): Measure attribute update status,", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\":", "\"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\":", "10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\":", "units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio", "family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\":", "point value inputs. measures_secondary (list): Subset of 'measures_all' with secondary microsegments to adjust.", "numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\":", "numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3,", "\"rate 2\": -50, \"rate 3\": -55, \"rate 4\": -60, \"rate 5\": -65, \"rate", "\"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": {", "\"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings", "27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\":", "\"\"\" Tests for running the engine \"\"\" # Import code to be tested", "2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/ energy and carbon costs)\": { \"2009\": numpy.array([", "test measure consumer metrics for ind, m in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind] cls.measures_master_msegs_out", "6\": -115, \"rate 7\": -120}}}}] # Adjust/finalize point value test measure consumer metrics", "cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate", "\"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\":", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": {", "savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7,", "10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\":", "\"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])}, \"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\":", "numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07,", "demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the", "\"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": {", "22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183,", "1}, \"measure\": 1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point value", "enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\":", "\"name\": \"sample compete measure r1 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1", "{ cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\":", "\"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (competed", "0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy", "chain being tested. overlap_key_scnd (string): Second sample string for secondary market microsegment key", "20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": {", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]),", "in enumerate(cls.a_run_dist.measures): m.consumer_metrics['anpv'] = consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\":", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15,", "10}, \"efficient\": { \"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5,", "\"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize point value test measure consumer", "-0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\":", "-0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08,", "\"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr", "# heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -435, \"rate 2\": -440,", "11.5, \"2010\": 11}}, \"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0,", "\"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\":", "None}}, \"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\":", "40}, \"efficient\": { \"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1,", "this structure, k and k2 are the keys that correspond to # the", "\"2010\": 15}, \"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415,", "Sample results partitioning fraction. ok_out (dict): Sample partitioned measure results data. \"\"\" @classmethod", "measure master microsegment including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample", "{\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667, 0.03566667, -0.01602415, -0.01602415,", "\"original energy (competed and captured)\": {}, \"adjusted energy (total captured)\": {}, \"adjusted energy", "{ \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\":", "\"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\":", "of 'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side", "\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -200, \"2010\": -200},", "3\": -70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\":", "{ \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None,", "metrics consumer_metrics_dist = [{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "\"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, {", "\"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "# Set information needed to finalize array test measure consumer # metrics consumer_metrics_final_dist", "supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme, self.test_htcl_adj)", "\"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "{ \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 30,", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44},", "8.65, 5.14])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]),", "data. adjust_key1 (string): First sample string for competed demand-side and supply-side market microsegment", "20}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10},", "10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34,", "self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure", "class TestMeasureInit(unittest.TestCase): \"\"\"Ensure that measure attributes are correctly initiated. Attributes: sample_measure (object): Residential", "-0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08,", "5)}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1,", "\"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40, \"2010\":", "= str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))", "\"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure", "cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 =", "service (CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "{}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2),", "[\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\":", "{\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": {", "\"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0,", "2.00, 4.54, 4.54, 5.00]), \"2010\": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])}, \"payback (w/ energy", "Test that valid inputs yield correct anpv, irr, payback, and # cost of", "tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in zip([ tested_data[\"key", "supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the", "including some measures with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary", "{ \"baseline\": { \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])},", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681,", "-0.27), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2, 0.5145794), numpy.pmt(0.07, 5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07,", "engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # Verify test measure results update", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist =", "\"2009\": numpy.array([ -1.565543e-08, -2.450490e-08, -1.934271e-08, -1.897398e-08, -1.418052e-08]), \"2010\": numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08,", "1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "\"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample compete measure c2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\":", "115, \"rate 4\": 120, \"rate 5\": 125, \"rate 6\": 10, \"rate 7\": 135}])}},", "\"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\":", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2,", "of a dict, the first item # in the tuple is the key", "20, \"2010\": 20}, \"efficient\": { \"2009\": 20, \"2010\": 15}}, \"competed\": { \"baseline\": {", "scaling\": 1}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): {", "and 'htcl_adj'. Verify that 'compete_res_primary' correctly calculates primary market shares and updates master", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]),", "1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {", "-60, \"rate 3\": -70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150,", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\":", "10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array( [5, 6, 7])}}, \"competed\":", "{\"2009\": 0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "5}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": {", "Sample measure master microsegment including stock cost and measure lifetime array. ok_out_point_res (dict):", "0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\":", "20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233,", "\"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon costs)\": { \"2009\": 4.54,", "-0.0802776]), \"2010\": numpy.array([ -0.021500000, -0.021500000, -0.08611353, -0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08,", "[\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\":", "\"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array( [20,", "5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": {", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": {", "5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1},", "measure # following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind],", "self.ok_ccostsave) # Test that valid inputs yield correct anpv, irr, payback, and #", "1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07,", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": numpy.array([0,", "16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5,", "2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist (list): List of competing measures", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = CommonTestMeasures().sample_measure measure_instance = run.Measure(handyvars, **cls.sample_measure) cls.attribute_dict = measure_instance.__dict__", "including lists stock cost input values instead of point values. measures_all (list): List", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45,", "[\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room", "in the case where the dicts are not of identical size, # zip_longest()", "\"2010\": numpy.array([6.0, 6.5, 8.0])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571,", "{ \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": {", "{\"2009\": 6.943250, \"2010\": 6.943250}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, {", "31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\":", "{ \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": {", "common methods for use in all tests below.\"\"\" def dict_check(self, dict1, dict2): \"\"\"Check", "[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1',", "Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test", "{\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68,", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "\"2010\": 100}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": {", "\"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None, \"2010\": None}}}] # Adjust/finalize point", "= CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = {", "\"2009\": numpy.array([1.11, 4.89, 0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\":", "8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": {", "assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this test", "[25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\":", "\"rate 4\": 130, \"rate 5\": 140, \"rate 6\": 150, \"rate 7\": 160}}}, \"energy", "cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\":", "{ \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy", "21.7, 21.2, 22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2", "60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]),", "market microsegments that should be generated for each Measure object in 'measures_all' following", "numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([", "-55, \"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75},", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\":", "\"baseline\": { \"2009\": 69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\":", "\"2010\": 6}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\":", "0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon costs)\":", "1. compete_meas3_dist (dict): Alternative version of sample residential supply-side cooling measure 1 including", "1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0,", "cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment including measure lifetime array. ok_master_mseg_dist4", "2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "Create an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Test", "{\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550},", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\":", "-0.02715319, -0.02355809])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604,", "= ('substituted entry', 5.2) # In this structure, k and k2 are the", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": {", "'measures_all_dist'. measures_supply_dist (list): Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure", "tests. Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2 (dict): Sample residential measure", "\"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\":", "and supply-side market microsegment key chain being tested. adjust_key2 (string): Second sample string", "6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 51, \"2010\": numpy.array([36, 39,", "= run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array test measure consumer", "{ \"rate 1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\": 80,", "{\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300},", "cash flows. ok_out (list): Outputs that should be generated for each set of", "cls.measures_all = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary", "21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": { \"2009\": numpy.array(", "# Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test", "6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\":", "\"Cooling\": {\"2009\": 15, \"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\":", "10, \"2010\": 10}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\": { \"rate 1\": 50, \"rate", "commercial supply-side lighting measure 1 including lists stock cost input values instead of", "ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the", "0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([", "6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5},", "6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {", "{\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355),", "\"rate 4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}}, {", "arrays. Attributes: handyvars (object): Useful variables across the class. sample_measure (object): Sample measure", "\"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89,", "0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785,", "[\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\":", "self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_com[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_com[1]) #", "20, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\":", "keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating all", "self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each", "and measure lifetime array. ok_out_point_res (dict): Measure attribute update status, savings, and portfolio/consumer-level", "5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]),", "are not equal. \"\"\" # zip() and zip_longest() produce tuples for the items", "-4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\":", "15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443,", "30}, \"efficient\": { \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15,", "numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([", "\"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\": numpy.array([36, 45, 61, 5, 54])}}}, \"carbon\":", "7.5 cls.ok_ecostsave = 0.5 cls.ok_csave = 50 cls.ok_ccostsave = 1 cls.ok_out_array = [", "Sample measure master microsegment including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict):", "energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including", "18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}}, \"competed\": { \"baseline\": {", "adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2:", "with a residential sample measure. ok_out_dist1 (dict): Measure attribute update status, savings, and", "100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\": {\"2009\": 10,", "{ \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": {", "\"2010\": { \"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\":", "\"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\":", "\"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": [\"electricity", "\"competed\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\":", "{ \"market share\": { \"original energy (total captured)\": {}, \"original energy (competed and", "{\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\":", "0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio metrics\": { \"Technical", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\":", "20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}},", "6.824341, 5.072499])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]),", "# market ('ok_master_mseg_dist4'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res)", "15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60,", "master microsegments for each sample measure # following competition/supply-demand overlap adjustments for ind,", "\"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\":", "2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": {", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics", "\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\":", "5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": { \"total\":", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\":", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -100,", "and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure5 = { \"name\": \"sample measure 5", "\"uncompeted\") # Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify", "\"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\":", "\"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592, -0.02023954, -0.02715319,", "self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj(", "19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\":", "information needed to finalize array test measure consumer # metrics consumer_metrics_final_dist = [{", "test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/ point value inputs.\"\"\" # Run", "0.3636364])}, \"payback (w/ energy and carbon costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252,", "Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) #", "14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}},", "costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488,", "\"competed\": { \"baseline\": {\"2009\": 11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\":", "\"baseline\": {\"2009\": 30, \"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"])", "\"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\":", "\"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct function output given valid inputs.\"\"\" dict1", "2, 0), \"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate", "5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11, 9.99])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "\"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "(w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{", "\"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0,", "1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2,", "5 for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr in", "15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45,", "List of demand-side Measure objects and associated contributing microsegment keys that overlap with", "the 'convert_to_numpy' function. Verify that the function converts terminal/leaf node lists in a", "\"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\":", "sample measure. ok_out_dist1 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics", "5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}}, { \"cce\": {", "\"rate 7\": -200}, \"2010\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\":", "{ \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\": 34.5, \"2010\": 33}},", "21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2,", "[\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"windows\"], \"technology_type\": {\"primary\": \"demand\",", "15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90}, \"efficient\": {\"2009\": 60,", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\":", "# (for input uncertainty test cases) elif isinstance(i, numpy.ndarray): self.assertTrue(type(i) == type(i2)) for", "numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2, -0.2169622), numpy.pmt(0.07,", "numpy.array([11, 11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])},", "Measure objects and associated contributing microsegment keys that overlap with 'measures_demand' Measure objects.", "10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5,", "{\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\":", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -135, \"rate 2\":", "\"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\":", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) #", "# Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output", "{\"2009\": 17, \"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}}, \"Commercial\": { \"Heating\": {\"2009\": 40,", "across all class functions.\"\"\" base_dir = os.getcwd() handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure =", "\"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\":", "-0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon", "'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1', 'single family", "\"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\":", "metrics\": False}, { \"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost", "1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\":", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\": 6}}}, \"energy\": {", "\"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\",", "competed demand-side and supply-side market microsegment key chain being tested. adjust_key2 (string): Second", "numpy.array([ 8.446248, 11.795815, 6.327488, 10.343948, 7.801544])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255,", "object incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that should be", "savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"],", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 1, 0.4672897), numpy.pmt(0.07, 2,", "\"2010\": 10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "\"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\": { \"2009\":", "\"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07,", "and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across", "\"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 30,", "\"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\": { \"total\":", "supply-side lighting measure 1 including lists stock cost input values instead of point", "63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([ 63.33550, 64.02682, 60.16002])}, \"efficient\": { \"2009\": numpy.array([ 42.22366,", "-0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": {", "\"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\":", "0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\":", "\"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\": { \"total\": { \"baseline\": {", "\"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2,", "\"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "data types in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in", "24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2, 22.5])}}}}, \"lifetime\": {", "# exploration of dict1 and dict2, respectively for (k, i), (k2, i2) in", "{ \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": {", "\"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\":", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\":", "captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas1_dist = { \"name\": \"sample compete measure r1", "15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\":", "\"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5,", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": {", "self.ok_out_point_res[3]) def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point value inputs.\"\"\" #", "11.34227, 10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([", "(w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000,", "\"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\":", "cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08,", "0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([", "numpy.array([ -2.466428e-08, -2.853592e-08, -2.023954e-08, -2.715319e-08, -2.355809e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\":", "0.75), \"rate 3\": numpy.pmt(0.45, 2, 1.165279), \"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\":", "\"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\":", "2\"]], [numpy.ndarray, int, float])])) # Offer external code execution (include all lines below", "Sample residential measure #1. sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict): Sample", "\"efficient\": { \"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\":", "\"commercial\": { \"2009\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190,", "\"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\":", "81, 11, 124])}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\":", "20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]),", "[\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": \"supply\"}, \"technology\": {\"primary\": [\"resistance heat\",", "\"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08,", "enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class", "incorporating all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that should be generated", "{ \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": {", "test_numpy_convert(self): \"\"\"Test for correct function output given valid input.\"\"\" # Instantiate measure measure_instance", "residential measure #1. sample_measure2 (dict): Sample residential measure #2. sample_measure3 (dict): Sample commercial", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114,", "the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] =", "{\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\": 27.77300, \"2010\": 27.77300}}, \"competed\": { \"baseline\": {\"2009\":", "function. Verify that cashflow inputs generate expected prioritization metric outputs. Attributes: handyvars (object):", "44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}},", "market microsegment key chain being tested. compete_meas1 (dict): Sample residential demand-side cooling measure", "{\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]), \"2010\":", "the second item is the value; # in the case where the dicts", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\":", "(w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\":", "and values\": {}, \"competed choice parameters\": {}, \"secondary mseg adjustments\": { \"market share\":", "8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 51,", "Sample residential demand-side cooling measure 2. compete_meas3 (dict): Sample residential supply-side cooling measure", "numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]),", "{\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678),", "costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889,", "(k2, i2) in itertools.zip_longest(sorted(dict1.items()), sorted(dict2.items()), fillvalue=fill_val): # Confirm that at the current location", "\"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}]", "numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\":", "\"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\":", "Sample residential supply-side cooling measure 2. compete_meas5 (dict): Sample residential supply-side cooling measure", "(object): Sample residential measure data. sample_measure_com (object): Sample commercial measure data. test_adopt_scheme (string):", "building type, structure type). compete_meas1 (dict): Sample commercial supply-side lighting measure 1. compete_meas2", "def test_metrics_ok_point_com(self): \"\"\"Test output given commercial measure with point value inputs.\"\"\" # Initialize", "on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps", "{ \"total\": { \"baseline\": {\"2009\": 22.22366, \"2010\": 22.22366}, \"efficient\": {\"2009\": 11.11183, \"2010\": 11.11183}},", "{ \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])},", "m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x", "\"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\": { \"2009\": 10, \"2010\":", "Master market microsegments that should be generated for each Measure object in 'measures_all'", "{ \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\":", "105.1, 105, 106.1])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\":", "-0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([", "\"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\":", "9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\": {", "copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based on above measures cls.a_run", "9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])}, \"efficient\":", "cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))],", "20}, \"measure\": {\"2009\": 16.04, \"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10},", "numpy.array([ 1.670251, 7.32767, 0.01445051]), \"2010\": numpy.array([ 1.670251, 7.32767, 0.01445051])}, \"efficient\": { \"2009\": numpy.array([", "subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects and associated contributing", "self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid", "\"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\": { \"savings (total)\": { \"2009\": numpy.array([149.4,", "sample supply-side measures self.a_run_dist.compete_res_primary( self.measures_supply_dist, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across", "160}, \"2010\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate", "all([isinstance(x, y) for x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested", "20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "(int): Sample baseline->measure stock cost delta. ok_esave (int): Sample measure energy savings. ok_ecostsave", "13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 22.22366, 22.68455, 20.10668]), \"2010\": numpy.array([ 22.22366, 22.68455,", "20.5]) }}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\":", "\"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}},", "\"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "\"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 46, \"2010\":", "\"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"]", "4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]),", "calculates primary market shares and updates master microsegments for a series of competing", "\"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\":", "89, 145, 96])}, \"cost savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 85, \"rate 2\":", "\"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external code", "the output for the test run of the 'metric_update' # function function_output =", "the normal output from zip_longest() fill_val = ('substituted entry', 5.2) # In this", "0.01]), \"2010\": numpy.array([1.11, 4.89, 0.01])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\":", "self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand sides", "2, 3, 4, 5], \"nested key 2\": 5}, \"key 2\": 10.8}, \"Max adoption", "0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\":", "for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test", "{ \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\":", "8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975,", "5\": 140, \"rate 6\": 150, \"rate 7\": 160}, \"2010\": { \"rate 1\": 100,", "# value is given as a tuple to be of comparable structure #", "{ \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array(", "cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply = cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\":", "= [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with point value", "1}}] def test_compete_com(self): \"\"\"Test outcomes given sample measures w/ point value inputs.\"\"\" #", "\"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\":", "1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key1: { \"stock\": {", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\": numpy.array([1.73,", "\"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "6\": -150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\":", "above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize array", "\"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\":", "0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([", "cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "{ \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\":", "cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate =", "149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93,", "ind, x in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else:", "(competed and captured)\": { cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2_dist", "measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object): Analysis", "{\"2009\": 0, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\":", "update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1])", "\"2010\": numpy.array([24, 26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5,", "34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5,", "numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\": { \"2009\": numpy.array([", "{\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\": {", "subset of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of", "{ \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": {", "based on above measures cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to", "a_run (object): Sample analysis engine object. ok_total (dict): Sample unpartitioned measure results data.", "20.47302, 15.21750])}, \"efficient\": { \"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341,", "handyvars (object): Useful variables across the class. sample_measure (object): Sample measure data with", "9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336,", "0.67, 0.33, 0.33, 0.33])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33,", "k2) # If the recursion has not yet reached the terminal/leaf node if", "6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\":", "176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105,", "= os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0", "6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "{\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353, 7.108108, 6.327488, 10.343948,", "that should be generated given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict):", "\"rate 3\": numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15,", "= 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "the supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme,", "energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([", "{ \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]),", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\": { \"total\":", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -190, \"rate 2\":", "'lighting', 'reflector (LED)', 'existing')) cls.overlap_key_scnd = str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling',", "\"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and", "measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[", "1, 5, 7, 8], [-10, 14, 2, 3, 4], [-10, 0, 1, 2],", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200,", "ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock cost and measure lifetime array.", "\"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1,", "3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])}, \"ccc", "11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46,", "\"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "\"2010\": 1.670251}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": {", "\"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\":", "microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated", "10}, \"measure\": {\"2009\": 0, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40,", "\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\":", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\":", "\"2010\": 1.73179114}}, \"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\":", "measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave (int): Sample baseline->measure", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]),", "of the 'metrics_update' function. Verify that cashflow inputs generate expected prioritization metric outputs.", "\"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ -0.04898876, -0.05783823, -0.05267604, -0.05230731, -0.04751385]),", "\"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\":", "\"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\":", "{\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009,", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\":", "\"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\":", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics", "numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07, 2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) },", "for correct data types in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for", "= [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {", "ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x", "# Test that the dicts from the current keys are equal self.assertCountEqual(i, i2)", "{ \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\":", "\"carbon cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496),", "15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}},", "{ \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}},", "# At the terminal/leaf node, formatted as a numpy array # (for input", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]), \"2010\":", "sample cash flows. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all", "ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample", "('ok_master_mseg_dist1'), the focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"]", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist3[0]) # Verify test measure", "\"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": {", "{\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5,", "\"name\": \"sample compete measure r1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\":", "0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 2, 0.4259346)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\":", "for x in [ cls.compete_meas1, copy.deepcopy(cls.compete_meas2), cls.compete_meas3, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand = cls.measures_all[0:2] cls.measures_supply", "\"2010\": 12}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.5, \"2010\":", "cooling measure 1. compete_meas3_dist (dict): Alternative version of sample residential supply-side cooling measure", "numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\":", "finalize array test measure consumer # metrics consumer_metrics = [{ \"stock cost\": {", "of the recursive # exploration of dict1 and dict2, respectively for (k, i),", "the dicts from the current keys are equal self.assertCountEqual(i, i2) # Continue to", "that 'compete_res_primary' correctly calculates primary market shares and updates master microsegments for a", "\"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP',", "7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output for the test run", "0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\": { \"residential\":", "in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y", "distribution\": {}}}, \"secondary mseg adjustments\": { \"market share\": { \"original energy (total captured)\":", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 15, \"2010\":", "{ \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": {", "\"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\":", "Measure objects. measures_overlap2_dist (dict): List of demand-side Measure objects and associated contributing microsegment", "{ \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}, \"efficient\": { \"2009\": 34.5, \"2010\": numpy.array([33,", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\":", "{\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16, 1]), \"2010\":", "'multi family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\": { \"total\": {", "{ \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2,", "Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level financial", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice", "None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\":", "\"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\":", "{ \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": {", "4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback", "object in 'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\" @classmethod def setUpClass(cls):", "17.29736, 10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681,", "\"efficient\": { \"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\":", "self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function. Verify cashflow", "\"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "-0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost", "class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list", "2, 0.9040091), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/", "6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": {", "\"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\":", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73, 0.02, 9.60]), \"2010\":", "for Engine including one sample residential measure. ok_cashflows (list): Set of sample input", "numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])}, \"irr (w/", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars,", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": {", "\"2010\": 5}, \"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "energy costs)\": {\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33,", "\"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": {", "10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\": {\"2009\": 5,", "20, 12])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}}, \"competed\": { \"baseline\":", "\"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\":", "\"2010\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\":", "{ \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}, \"competed\": {", "{\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\":", "cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0,", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -90, \"rate 2\": -95, \"rate", "zip_longest() fill_val = ('substituted entry', 5.2) # In this structure, k and k2", "32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]),", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "valid inputs.\"\"\" # Create an Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars,", "1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]),", "\"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost", "6, 7])}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0,", "{ \"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": {", "test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using test measure, run function", "for ind, x in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2)", "2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\": numpy.pmt(0.45, 2, 0.01724138), \"rate", "{ \"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15},", "\"2010\": 16.04}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\":", "\"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\":", "\"commercial\": { \"2009\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\": -145,", "costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333,", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, cls.overlap_key_scnd: { \"stock\":", "{ \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\":", "5\": 115, \"rate 6\": 120, \"rate 7\": 125}, { \"rate 1\": 105, \"rate", "60, \"2010\": 60}}, \"competed\": { \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15,", "0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": {", "measure 1 including lists stock cost input values instead of point values. measures_all", "\"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir", "1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "dict1 (dict): First dictionary to be compared dict2 (dict): Second dictionary to be", "6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\":", "13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\":", "\"2010\": 6.943250}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 41.65950, \"2010\": 41.65950}, \"efficient\": {\"2009\":", "\"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -370}, \"2010\":", "# Import code to be tested import run # Import needed packages import", "point values at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including", "{\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": {", "-0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market share\": {", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 20}},", "\"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}, \"cost\":", "given valid inputs.\"\"\" # Create an Engine instance using sample_measure list engine_instance =", "\"rate 6\": -150, \"rate 7\": -400}, \"2010\": { \"rate 1\": -350, \"rate 2\":", "operation of the 'convert_to_numpy' function. Verify that the function converts terminal/leaf node lists", "yield expected savings and financial metrics outputs. Attributes: handyvars (object): Useful variables across", "{\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}, \"competed\": { \"baseline\": {\"2009\":", "sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across", "string for competed demand-side and supply-side market microsegment key chain being tested. adjust_key2", "\"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "are equal; this should fail if one of the dicts # is empty,", "-5, \"2010\": -10}}, \"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194,", "numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([", "\"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\":", "{\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\":", "\"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\": {\"2009\": 0, \"2010\":", "10.11489, 14.99037])}, \"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443,", "{ yr: 5 for yr in cls.handyvars.aeo_years}}, }, \"demand\": { \"['AIA_CZ1', 'single family", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.4345794), \"2010\": numpy.pmt(0.07, 2,", "\"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\":", "0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/", "\"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\":", "measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\":", "\"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\":", "\"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]),", "-0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 5, 2.887211)]),", "\"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\":", "6.943250}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\":", "self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield correct anpv, irr,", "93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\":", "10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\":", "or unitary values that are found in i and i2, # respectively, at", "\"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\":", "\"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": {", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6, 7])}},", "None}, \"commercial\": { \"2009\": { \"rate 1\": -135, \"rate 2\": -140, \"rate 3\":", "110, \"rate 3\": 115, \"rate 4\": 120, \"rate 5\": 125, \"rate 6\": 10,", "{ \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])},", "energy or carbon market/savings value. Attributes: a_run (object): Sample analysis engine object. ok_total", "overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object incorporating all 'measures_all_dist' objects.", "{\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "-150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "Engine instance using sample_measure list engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output", "\"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\":", "\"efficient\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\":", "30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}},", "and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_point_com = [{ \"savings and", "to numpy arrays. Attributes: handyvars (object): Useful variables across the class. sample_measure (object):", "2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07,", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.02, \"2010\": 8.02}}}, \"energy\": { \"total\":", "{\"2009\": -5, \"2010\": -10}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings", "\"2010\": 1.29884336}}, \"competed\": { \"baseline\": { \"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": { \"2009\":", "cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt", "\"2010\": 11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46, \"2010\": 44}, \"efficient\": {\"2009\":", "2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501,", "\"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\":", "10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": {", "benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08,", "17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}},", "sample residential measure. ok_num_units (int): Sample number of competed units. ok_base_life (int): Sample", "for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx], places=2) class ResCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_res_primary,'", "\"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 20.82975, 15.17233,", "6.327488, 10.343948, 8.181351])}, \"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.51, 0.2700000, 0.2050000, 0.21,", "\"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\":", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626,", "125, \"rate 6\": 10, \"rate 7\": 135}])}}, \"energy cost\": { \"residential\": { \"2009\":", "family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home',", "\"competed\": { \"baseline\": {\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\":", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -135, \"rate", "20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}},", "cls.handyvars.retro_rate = 0 cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}}, \"lifetime\": {", "{\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\": {", "and captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"adjusted energy (total captured)\": {", "\"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0,", "# Run the measure competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2,", "given 'ok_master_mseg_dist2' with a residential sample measure. ok_out_dist3 (dict): Measure attribute update status,", "{\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 0, \"2010\": 24},", "array. ok_master_mseg_dist4 (dict): Sample measure master microsegment including stock cost and measure lifetime", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}},", "'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary',", "ok_ccostsave (int): Sample measure avoided carbon costs. ok_out_dicts (list): Output annuity equivalent Net", "'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2", "46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5,", "{ \"stock\": { \"cost savings (total)\": {\"2009\": -5, \"2010\": -10}, \"cost savings (annual)\":", "of demand-side Measure objects and associated contributing microsegment keys that overlap with 'measures_supply_dist'", "empty, is missing section(s), or has different key names self.assertEqual(k, k2) # If", "-4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\":", "energy and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\":", "-65, \"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": {", "normal output from zip_longest() fill_val = ('substituted entry', 5.2) # In this structure,", "\"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15,", "yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr: 5 for yr", "of sample commercial supply-side lighting measure 1 including lists stock cost input values", "{ \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 30, \"2010\": 30}},", "\"rate 7\": 115}, \"2010\": { \"rate 1\": 85, \"rate 2\": 90, \"rate 3\":", "15}, \"efficient\": { \"2009\": 15, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\":", ".45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\": { \"Residential\": { \"Heating\": {\"2009\":", "100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": {\"2009\": 50, \"2010\": 100}}},", "10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5,", "\"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\":", "2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate", "\"rate 1\": -40, \"rate 2\": -50, \"rate 3\": -55, \"rate 4\": -60, \"rate", "4.218185, 3.081800]), \"2010\": numpy.array([ 5.345834, 7.580577, 3.931585, 6.612039, 4.915578])}, \"irr (w/ energy and", "\"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\":", "[\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\":", "{ \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\": { \"2009\": 120,", "\"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": { \"Technical potential\": {", "\"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7,", "1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\":", "cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows =", "# In this structure, k and k2 are the keys that correspond to", "\"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7,", "0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},", "5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}}, \"energy\":", "the class. sample_measure (object): Sample measure data with lists to convert. \"\"\" @classmethod", "{ \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 0, \"2010\":", "node if isinstance(i, dict): # Test that the dicts from the current keys", "\"2010\": numpy.array([12, 10, 6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\":", "this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\":", "Set information needed to finalize array test measure consumer # metrics consumer_metrics =", "20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array(", "Present Value dicts that should be generated given valid sample inputs. ok_out_array (list):", "\"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"cost\": { \"stock\":", "and financial metrics outputs. Attributes: handyvars (object): Useful variables across the class. sample_measure_res", "205, 219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]),", "\"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24, 20,", "20, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10,", "\"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\":", "\"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1,", "measure consumer # metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": { \"2009\":", "\"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\": {\"2009\": 100, \"2010\":", "\"baseline\": { \"2009\": numpy.array([ 41.65950, 30.34466, 44.97110]), \"2010\": numpy.array([ 41.65950, 30.34466, 44.97110])}, \"efficient\":", "be generated given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod def setUpClass(cls):", "\"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost", "5, 3])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([36, 30,", "6\": -160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\":", "test run of the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime),", "{ \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "\"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.overlap_key: { \"stock\":", "\"market_scaling_fractions_source\": None, \"measure_type\": \"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"],", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\":", "42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": {", "{ \"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285,", "measure competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove", "\"efficient\": { \"2009\": numpy.array( [0, 1, 2]), \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\":", "20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\":", "[5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": {", "measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types in measure", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify test measure portfolio-level", "\"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\":", "(float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample", "cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2, 2.043061), numpy.pmt(0.07,", "10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "in the dict structure, # the keys are equal; this should fail if", "\"efficient\": {\"2009\": 0.432947785, \"2010\": 0.432947785}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "20}, \"efficient\": { \"2009\": numpy.array( [15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}},", "26, 32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\":", "that the function converts terminal/leaf node lists in a dict to numpy arrays.", "{ \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75),", "in i and i2, # respectively, at the current level of the recursive", "a_run (object): Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist (list): List of", "\"rate 4\": numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065,", "object incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure objects and", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -435, \"rate 2\": -440, \"rate", "(dict): Sample measure master microsegment including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2", "that valid input cashflows yield correct output payback values for idx, cf in", "keys that correspond to # the dicts or unitary values that are found", "\"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\":", "two dicts. Args: dict1 (dict): First dictionary to be compared dict2 (dict): Second", "measure_list = [run.Measure(handyvars, **sample_measure)] cls.a_run = run.Engine(handyvars, measure_list) cls.ok_total = {\"2009\": 100, \"2010\":", "= 0 cls.test_adopt_scheme = \"Max adoption potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single", "residential measures; and that 'htcl_adj' properly accounts for heating and cooling supply-demand overlaps.", "{ \"2009\": 34.5, \"2010\": numpy.array([33, 33, 31.5])}}, \"competed\": { \"baseline\": { \"2009\": 23,", "-90, \"rate 2\": -95, \"rate 3\": -100, \"rate 4\": -105, \"rate 5\": -110,", "information needed to finalize array test measure consumer # metrics consumer_metrics_dist = [{", "\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array( [5,", "\"residential\": { \"2009\": -400, \"2010\": -400}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon", "None}}}, { \"stock cost\": { \"residential\": { \"2009\": 100, \"2010\": 100}, \"commercial\": {", "not of identical size, # zip_longest() will use the fill value created below", "== type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the", "savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": {\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce", "Supply-side subset of 'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects and associated", "measure attributes are correctly initiated. Attributes: sample_measure (object): Residential sample measure object. attribute_dict", "is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the", "{ \"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "[test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme, \"uncompeted\") # For first test case, verify correct adoption/competition scenario", "\"2010\": numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5,", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\":", "the 'payback' function. Verify cashflow input generates expected payback output. Attributes: handyvars (object):", "run.UsefulInputFiles()) # Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com =", "\"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99,", "test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using", "measure r2\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}}} cls.compete_meas4 =", "numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([ 2.7285e-08, 1.9795e-08, -2.023954e-08, -2.715319e-08, -5.525120e-08])},", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2", "{\"2009\": 50, \"2010\": 100}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10,", "95, \"rate 4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115},", "test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus", "\"2009\": 10, \"2010\": numpy.array([16, 15, 13])}, \"efficient\": { \"2009\": 20, \"2010\": numpy.array([8, 9,", "\"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018),", "input cashflows yield correct output payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf),", "\"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\":", "that should be generated given valid sample inputs. ok_out_array (list): Other financial metric", "metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics_final[ind] cls.measures_all_dist = [run.Measure(cls.handyvars, **x)", "-0.75}}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy", "{ \"key 1\": { \"nested key 1\": [1, 2, 3, 4, 5], \"nested", "-1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\":", "{\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "{\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10,", "0.09333333, 0.1222222])}}] cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": { \"Technical potential\": {", "{ \"uncompeted\": True, \"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}},", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 50, \"rate 2\": 60,", "enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in [", "= run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure4 cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_base_life = 3", "\"stock\": { \"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\":", "{ \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}},", "costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333, 0.2233333,", "Engine including one sample residential measure. ok_cashflows (list): Set of sample input cash", "{ \"2009\": numpy.array([ 42.22366, 42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": {", "numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": {", "sample Measure objects with point value inputs. measures_demand (list): Demand-side subset of 'measures_all'.", "energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {", "1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\":", "across the class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string): First sample", "[{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 2.23,", "Sample measure energy cost savings. ok_csave (int): Sample measure avoided carbon emissions. ok_ccostsave", "\"markets\": { \"Technical potential\": { \"key 1\": { \"nested key 1\": [1, 2,", "169, 194, 149]), \"2010\": numpy.array([194, 205, 219, 289, 176])}, \"savings (annual)\": { \"2009\":", "all 'measures_all_dist' objects. measure_master_msegs_out (dict): Master market microsegments that should be generated for", "\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\":", "20.11])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": numpy.array([11.11, 11.34,", "{ \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {", "{\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist", "**sample_measure)] cls.ok_base_life = 3 cls.ok_product_lifetime = 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1", "self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps across the supply and", "values instead of point values. compete_meas2 (dict): Sample residential demand-side cooling measure 2.", "{}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3_dist = { \"name\": \"sample compete measure r3 dist\",", "across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.retro_rate =", "\"2010\": 6}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "and values\": { cls.adjust_key1: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "\"energy\": { \"savings (total)\": { \"2009\": numpy.array([184, 173, 169, 194, 149]), \"2010\": numpy.array([194,", "ok_out_point_res (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that should", "\"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\":", "-0.05267604, -0.05230731, -0.07946463]), \"2010\": numpy.array([ -0.047715000, -0.05520500, -0.09523954, -0.10215319, -0.13025120])}, \"ccc\": { \"2009\":", "= 'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\":", "}}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]),", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\":", "[ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5] cls.supply_demand_adjust1_dist", "of 'measures_all_dist' with secondary microsegments to adjust. a_run_dist (object): Analysis engine object incorporating", "portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist2' with a residential sample", "Test for correct data types in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes:", "values that are found in i and i2, # respectively, at the current", "289, 176])}, \"savings (annual)\": { \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114,", "\"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None},", "run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize point value test measure #", "{ \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])},", "associated with these primary market microsegments. Attributes: handyvars (object): Useful variables across the", "\"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, { \"rate 1\": 205,", "\"baseline\": { \"2009\": numpy.array([ 17.77300, 10.22977, 19.98073]), \"2010\": numpy.array([ 17.77300, 10.22977, 19.98073])}, \"efficient\":", "self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme, self.test_htcl_adj) # Run the measure competition routine on sample supply-side", "\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": numpy.array([ 0, 1, 2])}}},", "15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\":", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\":", "{\"2009\": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]), \"2010\": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])}, \"payback", "\"nested key 2\": 5}, \"key 2\": 10.8}, \"Max adoption potential\": { \"key 1\":", "\"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "{ \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\":", "being tested. compete_meas1 (dict): Sample residential demand-side cooling measure 1. compete_meas1_dist (dict): Alternative", "operation of the 'metrics_update' function. Verify that cashflow inputs generate expected prioritization metric", "\"measure\": { \"2009\": numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\":", "None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\":", "metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist1[3])", "(dict): Sample commercial supply-side lighting measure 3. compete_meas_dist (dict): Alternative version of sample", "= CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption potential' cls.ok_rate = 0.07", "\"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "\"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost", "{ \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.670251, \"2010\": 1.670251}},", "20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}},", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]),", "32])}}, \"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": {", "potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\": { \"cost", "'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict): Measure attribute update status, savings,", "sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj) # Check updated", "{ yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\": { yr: 5 for", "or carbon market/savings value. Attributes: a_run (object): Sample analysis engine object. ok_total (dict):", "\"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1,", "{}}}} cls.compete_meas5 = { \"name\": \"sample compete measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single", "\"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\":", "22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183,", "\"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\":", "{}, \"adjusted energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\":", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure3 = { \"name\": \"sample", "\"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\": {}, \"competed choice parameters\":", "\"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\":", "{\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\":", "'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = {", "\"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\": 8.886499}}, \"competed\": { \"baseline\":", "-6.7, -4.2, -5.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),", "\"sample measure 5 (commercial)\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\":", "(CFL)\"], \"secondary\": None}, \"markets\": { \"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing", "metrics consumer_metrics_final_dist = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95},", "\"2010\": 0.5567503}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001},", "= [cls.measures_all_dist[1]] cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array", "{ \"2009\": numpy.array([94, 93, 99, 84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])},", "sample Measure objects with array inputs. measures_demand_dist (list): Demand-side subset of 'measures_all_dist'. measures_supply_dist", "{\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 11.5, \"2010\": 11}}}, \"energy\": { \"total\": {", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [5, 6,", "{ \"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"],", "'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))],", "0.7009346), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\":", "attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\", "using test measure, run function on it engine_instance = run.Engine(self.handyvars, [test_meas]) engine_instance.calc_savings_metrics( self.test_adopt_scheme,", "values. measures_all (list): List of all competing measures with point value inputs. measures_secondary", "0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": {", "benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\":", "{ \"total\": { \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\": { \"2009\": 23,", "(grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1',", "# Run measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) #", "\"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5,", "{\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132), \"rate", "that at the current location in the dict structure, # the keys are", "objects and associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measures_overlap2", "\"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455, 17.29736, 10.29000])}, \"efficient\": { \"2009\":", "some array inputs.\"\"\" # Run measure competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist,", "correct anpv, irr, payback, and # cost of conserved energy/carbon outputs for ind,", "2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346),", "69, \"2010\": numpy.array([66, 66, 63])}, \"efficient\": { \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}},", "\"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA", "-390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\": { \"rate 1\": -350, \"rate", "10, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10,", "test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine", "cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.test_adopt_scheme = \"Max adoption potential\" cls.overlap_key = str( ('primary',", "{ \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\": numpy.pmt(1.0, 2, -0.125), \"rate 3\":", "\"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\":", "1\": 50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\": 80, \"rate 5\":", "= [run.Measure(cls.handyvars, **x) for x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist", "run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using test measure,", "2.079221)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.798978), numpy.pmt(0.07, 2, 1.925539), numpy.pmt(0.07, 2, 1.654337), numpy.pmt(0.07,", "24}}, \"competed\": { \"baseline\": {\"2009\": 25.5, \"2010\": 18}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}},", "value created below as a # substitute in the dict that has missing", "1.670251}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}},", "entry', 5.2) # In this structure, k and k2 are the keys that", "{ \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": {", "\"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\": {\"2009\": 0, \"2010\": 12}, \"efficient\":", "{}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\":", "(dict): Sample residential supply-side cooling measure 3. measures_all (list): List of all competing/interacting", "1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\":", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": { \"total\":", "0.2200000]), \"2010\": numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def", "\"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\": 16}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "(total)\": { \"2009\": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]), \"2010\": numpy.array([199.4, 191.3, 194.9, 195.0,", "0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.227001, 9.770226,", "0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}},", "= consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist,", "given 'ok_master_mseg_dist3' with a residential sample measure. ok_out_dist4 (dict): Measure attribute update status,", "the recursive # exploration of dict1 and dict2, respectively for (k, i), (k2,", "Sample measure master microsegment including measure lifetime array. ok_master_mseg_dist4 (dict): Sample measure master", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2, 1.139051), numpy.pmt(0.07, 2,", "First sample string for competed primary market microsegment key chain being tested. overlap_key_scnd", "{ \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"cost\": { \"stock\": { \"total\": {", "\"competed\": { \"baseline\": {\"2009\": 34.5, \"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\":", "numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "\"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": {", "str( ('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key =", "10.05334])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183,", "50, \"rate 2\": 60, \"rate 3\": 70, \"rate 4\": 80, \"rate 5\": 90,", "cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.compete_meas2 = { \"name\": \"sample", "\"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}}, \"competed\":", "1.73179114, \"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785,", "\"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "\"2010\": 11}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 69, \"2010\": 66}, \"efficient\": {\"2009\":", "17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([", "numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\":", "{ \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])},", "\"\"\"Test the operation of the 'metrics_update' function. Verify that cashflow inputs generate expected", "-9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\":", "measure avoided carbon costs. ok_out_dicts (list): Output annuity equivalent Net Present Value dicts", "in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist = cls.measures_all_dist[2:5]", "{\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}}}, \"energy\":", "0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088,", "7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\":", "cls.a_run = run.Engine(cls.handyvars, cls.measures_all) # Set information needed to finalize point value test", "\"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}}, \"carbon\":", "\"2010\": 10}, \"measure\": {\"2009\": 2.23, \"2010\": 2.23}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\":", "{ \"baseline\": {\"2009\": 63.33550, \"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": {", "input cash flows. ok_out (list): Outputs that should be generated for each set", "update status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_point'", "\"total\": { \"baseline\": { \"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": 20, \"2010\":", "-8.564064e-08, -1.127980e-07]), \"2010\": numpy.array([ -4.771500e-08, -5.520500e-08, -9.523954e-08, -1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock", "1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": {", "{ \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20, 12])}, \"efficient\": {", "{\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\":", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": {", "= run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 # Create Engine instance using test", "0.5567503, \"2010\": 0.5567503}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "demand-side and supply-side market microsegment key chain being tested. compete_meas1 (dict): Sample residential", "30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15},", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 8}}}, \"energy\": {", "95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\":", "\"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\":", "\"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\":", "\"2010\": 40}, \"efficient\": {\"2009\": 25, \"2010\": 25}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\":", "-0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([", "6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "value test measure # consumer metrics consumer_metrics_final = [{ \"stock cost\": { \"residential\":", "metric outputs. Attributes: handyvars (object): Useful variables across the class. measure_list (list): List", "self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Portfolio metrics self.assertEqual(list(sorted(engine_instance.measures[ 0].portfolio_metrics[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Verify test measure", "\"2009\": numpy.array( [25.1, 24.7, 23.7, 31.2, 18.5]), \"2010\": numpy.array( [20.1, 18.7, 21.7, 21.2,", "\"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\",", "(string): Sample consumer adoption scheme. ok_rate (float): Sample discount rate. ok_master_mseg_point (dict): Sample", "None}}, \"carbon cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 0.9040091), \"2010\": numpy.pmt(0.07, 2,", "{ \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}}, \"carbon\": {", "has missing content; this # value is given as a tuple to be", "\"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022, -0.01262901])}, \"cce (w/ carbon cost benefits)\": {", "ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost (int): Sample baseline stock cost. ok_scostsave", "42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "adjusts any secondary markets associated with these primary market microsegments. Attributes: handyvars (object):", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\":", "{\"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6,", "\"baseline\": {\"2009\": 8.886499, \"2010\": 8.886499}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\":", "\"Heating\": {\"2009\": .30, \"2010\": .30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\":", "5\": 105, \"rate 6\": 110, \"rate 7\": 115}, { \"rate 1\": 205, \"rate", "key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 0, \"2010\":", "cls.adjust_key2: { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "\"measure\": {\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\":", "variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string): First", "# Import needed packages import unittest import numpy import copy import itertools import", "-1.614253e-08]), \"2010\": numpy.array([ -1.114697e-08, -1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\":", "that measure master microsegment inputs yield expected savings and financial metrics outputs. Attributes:", "1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0,", "measure. ok_out_dist1 (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571, 0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785,", "potential\" cls.adjust_key1 = str( ('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand',", "{ \"stock\": { \"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])},", "self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the 'convert_to_numpy' function.", "'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure", "\"cce\": { \"2009\": numpy.array([ -0.01565543, -0.02450490, -0.01934271, -0.01897398, -0.01418052]), \"2010\": numpy.array([ -0.02466428, -0.02853592,", "{ \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": {", "numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346),", "= self.ok_master_mseg_dist3 # Create Engine instance using test measure, run function on it", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] def test_compete_res(self): \"\"\"Test outcomes given valid", "\"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 100, \"rate 2\": 110, \"rate", "\"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy", "leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy, carbon, and energy/carbon", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\":", "the supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_supply_dist, self.test_adopt_scheme,", "\"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\":", "measure 1. compete_meas2 (dict): Sample commercial supply-side lighting measure 2. compete_meas3 (dict): Sample", "3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2,", "\"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\":", "\"2010\": 5}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\":", "\"measure\": { \"2009\": 0, \"2010\": numpy.array([16, 15, 13])}}, \"competed\": { \"all\": {\"2009\": 10,", "{ \"rate distribution\": { \"2009\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],", "12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": {", "as a # substitute in the dict that has missing content; this #", "16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out (dict): Master", "{\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\": .20, \"2010\": .20}, \"Cooling\": {\"2009\":", "object attributes to keys from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key],", "functions. Verify that 'compete_com_primary' correctly calculates primary market shares and updates master microsegments", "\"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": {", "\"2010\": 1.73179114}}, \"competed\": { \"baseline\": {\"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": {\"2009\": 0.432947785, \"2010\":", "\"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\":", "energy costs)\": { \"2009\": 0.25, \"2010\": 0.33}, \"payback (w/ energy and carbon costs)\":", "10.29000])}, \"efficient\": { \"2009\": numpy.array([ 8.022273, 8.648681, 5.144998]), \"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}},", "4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\": {", "\"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": { \"2009\": 0.25, \"2010\": 0.33},", "measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"],", "given 'ok_master_mseg_dist4' with a residential sample measure. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables", "= { \"name\": \"sample compete measure r3 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family", "suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point # Create Engine instance", "(total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original energy (competed and captured)\":", "{ \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341}, \"efficient\": {\"2009\": 6.511136, \"2010\": 6.511136}}}}, \"lifetime\": {\"baseline\":", "\"competed\": True}, \"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False},", "self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key,", "{\"2009\": .10, \"2010\": .10}, \"Cooling\": {\"2009\": .15, \"2010\": .15}}, \"Commercial\": { \"Heating\": {\"2009\":", "{\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\":", "6, 7])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\":", "numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227, 20.05334])}, \"efficient\": { \"2009\": numpy.array([", "test_adopt_scheme (string): Sample consumer adoption scheme. overlap_key (string): First sample string for competed", "40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}},", "Value dicts that should be generated given valid sample inputs. ok_out_array (list): Other", "\"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2,", "the current location in the dict structure, # the keys are equal; this", "input.\"\"\" # Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct data", "x, y in zip([ tested_data[\"key 1\"][\"nested key 1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([-150,", "-1.021532e-07, -1.302512e-07])}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {", "output. Attributes: handyvars (object): Useful variables across the class. measure_list (list): List for", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.04958678), \"rate", "{ \"baseline\": { \"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0,", "}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": {", "-145, \"rate 4\": -150, \"rate 5\": -155, \"rate 6\": -160, \"rate 7\": -170},", "{\"2009\": 23, \"2010\": 22}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\":", "\"efficient\": {\"2009\": 30, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\":", "200, \"2010\": 300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100,", "20, \"2010\": 20}, \"measure\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10,", "{ \"original energy (total captured)\": {}, \"original energy (competed and captured)\": {}, \"adjusted", "sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this test suite test_meas =", "(w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\": -0.08611353}, \"ccc\": {\"2009\": -1.602415e-08, \"2010\":", "for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test", "\"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571},", "\"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),", "dicts or unitary values that are found in i and i2, # respectively,", "2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014),", "1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\":", "7, 8], [-10, 14, 2, 3, 4], [-10, 0, 1, 2], [10, 4,", "primary market shares and updates master microsegments for a series of competing residential", "{ \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}},", "numpy.array([ 0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test", "numpy.array( [15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {", "numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3,", "\"2010\": numpy.array([ 0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])}, \"payback (w/ energy and carbon costs)\":", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key2:", "-1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08, -8.611353e-08, -1.247637e-07])}}, { \"anpv\": { \"stock cost\":", "0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001,", "updated competed master microsegments for each sample measure # following competition/supply-demand overlap adjustments", "numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "60, \"2010\": 60}, \"efficient\": {\"2009\": 45, \"2010\": 45}}, \"competed\": { \"baseline\": {\"2009\": 30,", "\"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17,", "second item is the value; # in the case where the dicts are", "{ \"2009\": 0, \"2010\": numpy.array([36, 30, 18])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([24,", "for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for", "numpy.ndarray): self.assertTrue(type(i) == type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) #", "\"2010\": 63.33550}, \"efficient\": {\"2009\": 42.22366, \"2010\": 42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\":", "\"Technical potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg keys and values\": {},", "function_output = engine_instance.metric_update( self.measure_list[0], self.ok_base_life, int(self.ok_product_lifetime), self.ok_base_scost, self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) #", "and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\": numpy.array([ 4.882353,", ".40, \"2010\": .40}, \"Cooling\": {\"2009\": .45, \"2010\": .45}}}} cls.ok_out = { \"AIA CZ1\":", "energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\": numpy.array([", "arrays. ok_master_mseg_dist2 (dict): Sample measure master microsegment including stock cost array. ok_master_mseg_dist3 (dict):", "self.assertEqual(k, k2) # If the recursion has not yet reached the terminal/leaf node", "= { \"supply\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr:", "{ \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}},", "measures_overlap2_dist (dict): List of demand-side Measure objects and associated contributing microsegment keys that", "2.400830388])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\":", "self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist4[3]) class", "\"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40},", "100, \"2010\": 100}, \"cost savings (total)\": {\"2009\": 10, \"2010\": 15}, \"cost savings (annual)\":", "[15, 16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": {", "numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0,", "\"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly", "\"2009\": None, \"2010\": None}}}] # Adjust/finalize point value test measure consumer metrics for", "\"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\":", "for Engine including one sample residential measure. ok_num_units (int): Sample number of competed", "(w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.232209e-08, -9.117156e-08, -8.600937e-08, -8.564064e-08, -8.084718e-08]), \"2010\":", "\"efficient\": {\"2009\": 30, \"2010\": 30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\":", "\"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "consumer_metrics[ind] cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]]", "\"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": {", "\"efficient\": {\"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\":", "[\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None},", "a series of competing commercial measures; and that 'secondary_adj' correctly adjusts any secondary", "{}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\":", "mseg keys and values\": {}, \"competed choice parameters\": {}, \"secondary mseg adjustments\": {", "generated given valid sample inputs. ok_out_array (list): Other financial metric values that should", "measure_master_msegs_out_dist (dict): Master market microsegments that should be generated for each Measure object", "\"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 10, \"2010\": 10}}, \"competed\":", "c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": { \"primary\": [\"lighting\"], \"secondary\": None}, \"technology\": [\"reflector", "financial metrics outputs. Attributes: handyvars (object): Useful variables across the class. sample_measure_res (object):", "= run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist1 # Create Engine instance using test", "numpy.pmt(0.15, 2, 0.3695652), \"rate 6\": numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": {", "{\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\":", "3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([ 2.425032, 2.584709, 2.240438, 2.298386, 2.147181])}, \"irr (w/", "family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run = run.Engine(cls.handyvars, cls.measures_all) #", "\"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\":", "5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566, 4.335205,", "\"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\": 11.5, \"2010\": numpy.array([11, 11,", "\"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {", "numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254), \"rate 7\": -0.125}}}, \"energy cost\":", "[ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object based on", "0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "110, \"rate 7\": 115}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class ComCompeteTest(unittest.TestCase, CommonMethods): \"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify", "300}, \"efficient\": {\"2009\": 50, \"2010\": 100}}, \"competed\": { \"baseline\": {\"2009\": 100, \"2010\": 150},", "'assembly', 'electricity (grid)', 'cooling', 'demand', 'lighting gain', 'existing')) cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing'))", "10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "measure master microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample measure master microsegment", "1.73179114, 0.01808835, 9.60332155]), \"2010\": numpy.array([ 1.73179114, 0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336,", "\"rate 6\": -230, \"rate 7\": -200}, \"2010\": { \"rate 1\": -190, \"rate 2\":", "with array inputs. measures_secondary_dist (list): Subset of 'measures_all_dist' with secondary microsegments to adjust.", "# Run secondary microsegment adjustments on sample measure self.a_run_dist.secondary_adj( self.measures_secondary_dist, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme)", "0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501,", "class. measure_list (list): List for Engine including one sample residential measure. ok_cashflows (list):", "key chain being tested. compete_meas1 (dict): Sample residential demand-side cooling measure 1. compete_meas1_dist", "1}}, { \"stock\": { \"total\": { \"all\": { \"2009\": 30, \"2010\": 30}, \"measure\":", "subset of 'measures_all'. measures_overlap1 (dict): List of supply-side Measure objects and associated contributing", "\"rate 2\": 110, \"rate 3\": 115, \"rate 4\": 120, \"rate 5\": 125, \"rate", "point in all # test files) def main(): \"\"\"Trigger default behavior of running", "\"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": { \"2009\": 30,", "{\"primary\": [\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\":", "'calc_savings_metrics' function. Verify that measure master microsegment inputs yield expected savings and financial", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "= { \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": {", "\"commercial\": { \"2009\": { \"rate 1\": 100, \"rate 2\": 110, \"rate 3\": 120,", "42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}},", "cooling measure 2. compete_meas3 (dict): Sample residential supply-side cooling measure 1. compete_meas3_dist (dict):", "\"total\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 5, \"2010\": 5}}, \"competed\":", "{ \"2009\": 3.45, \"2010\": 2.44}, \"irr (w/ energy and carbon costs)\": { \"2009\":", "\"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "numpy.array([6, 5, 3])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\":", "on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on", "0.2857143]), \"2010\": numpy.array([ 0.3344482, 0.3194888, 0.3533569, 0.3472222, 0.3636364])}, \"payback (w/ energy and carbon", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.02, 8.65, 5.14]),", "'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family", "numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2,", "numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2, 0.8128544), \"rate 6\": numpy.pmt(0.065, 2, 0.9103132),", "-2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}, \"cost savings (annual)\":", "variables across the class. sample_measure (object): Sample measure data with lists to convert.", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -50,", "numpy.array([11.0, 11.0, 10.5])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([0, 0, 0])}}}, \"energy\": {", "self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[1]) # Verify", "\"2009\": 0, \"2010\": numpy.array([18, 15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5,", "1\": 100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\": 130, \"rate 5\":", "be generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_point_com (dict): Measure attribute", "\"2010\": numpy.array([ 8.022273, 8.648681, 5.144998])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.022273, 8.648681,", "\"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": { \"total\": {", "\"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "# Reset aeo_years cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5", "status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist2[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) #", "{\"2009\": 20, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "{\"2009\": 21.11183, \"2010\": 21.11183}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}, \"carbon\": { \"total\": {", "# cost of conserved energy/carbon outputs for ind, x in enumerate(self.ok_out_array): if x", "\"2010\": 15}, \"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\":", "for each sample measure # following competition/secondary microsegment adjustments for ind, d in", "competed master microsegments for each sample measure # following competition/supply-demand overlap adjustments for", "keys that overlap with 'measures_demand' Measure objects. measures_overlap2 (dict): List of demand-side Measure", "(competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0,", "places=2) else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function.", "\"measure\": {\"2009\": 0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\":", "-0.04976366])}, \"cce (w/ carbon cost benefits)\": { \"2009\": numpy.array([ 0.002333333, 0.002333333, -0.04935749, -0.04935749,", "(list): Other financial metric values that should be generated given valid sample inputs.", "11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 46,", "# in the case where the dicts are not of identical size, #", "17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": numpy.array([5,", "\"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]), \"2010\": numpy.array([ -0.01145724, -0.01084246, -0.01014934, -0.007691022,", "10}, \"efficient\": { \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5,", "numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}, \"efficient\": { \"2009\": numpy.array([", "\"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15, \"2010\": 15}}}, \"energy\": { \"total\":", "8.02, \"2010\": 8.02}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\":", "3\": -190, \"rate 4\": -205, \"rate 5\": -180, \"rate 6\": -230, \"rate 7\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5,", "numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15},", "10, \"2010\": 15}}, \"carbon\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\":", "0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794)]),", "{ \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\": 22.22}},", "90])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\":", "{ \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\":", "numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}},", "\"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\":", "{ \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 25}},", "1\"], tested_data[\"key 1\"][\"nested key 2\"], tested_data[\"key 2\"]], [numpy.ndarray, int, float])])) # Offer external", "= { \"name\": \"sample compete measure c1\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"], \"end_use\": {", "\"2010\": { \"rate 1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\":", "{ \"stock\": { \"total\": { \"baseline\": {\"2009\": 23, \"2010\": 22}, \"efficient\": {\"2009\": 11.5,", "(float): Sample discount rate. ok_master_mseg_point (dict): Sample measure master microsegment including all point", "-0.10215319, -0.13025120])}, \"ccc\": { \"2009\": numpy.array([ 3.6380e-08, 1.9260e-08, -1.934271e-08, -1.897398e-08, -4.613129e-08]), \"2010\": numpy.array([", "15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "-1.161895e-08, -1.140434e-08, -1.139849e-08, -1.146315e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -8.904701e-08,", "\"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [ 0.1, 0.1,", "\"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}, \"carbon cost\": { \"residential\":", "{ \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\": {\"baseline\":", "= [5.14, 0.71, 6.5, 0, 999] def test_cashflow_paybacks(self): \"\"\"Test for correct outputs given", "\"sample compete measure r4\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"],", "{ \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\": 18}}, \"competed\": {", "w/ some array inputs.\"\"\" # Run measure competition routine on sample measures self.a_run_dist.compete_com_primary(", "69, \"2010\": 66}, \"efficient\": {\"2009\": 46, \"2010\": 44}}, \"competed\": { \"baseline\": {\"2009\": 34.5,", "1\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None, \"measure_type\": \"full", "at terminal leaf nodes. ok_master_mseg_dist1 (dict): Sample measure master microsegment including energy, carbon,", "85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\": 100, \"rate 5\": 105,", "\"rate 7\": -370}, \"2010\": { \"rate 1\": -435, \"rate 2\": -440, \"rate 3\":", "(total captured)\": {}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}, \"Max", "1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)", "\"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]),", "scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\": { \"2009\": [", "mseg keys and values\": { cls.overlap_key: { \"stock\": { \"total\": { \"all\": {\"2009\":", "\"2010\": 33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": {", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity", "21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1,", "6.612039, 5.452729])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891,", "family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home',", "16.3, 13.3, 13.8, 12.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8,", "None}}, \"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": { \"2009\":", "= [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\":", "\"rate 1\": numpy.pmt(10.0, 2, -0.4090909), \"rate 2\": numpy.pmt(1.0, 2, 0), \"rate 3\": numpy.pmt(0.45,", "dictionary to be compared Raises: AssertionError: If dictionaries are not equal. \"\"\" #", "2, 1.699537), numpy.pmt(0.07, 2, 1.582016)]) }, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None,", "it a sample 'uncompeted' # market ('ok_master_mseg_point'), the focus of this test suite", "numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1,", "'uncompeted' # market ('ok_master_mseg_dist2'), the focus of this test suite test_meas = run.Measure(self.handyvars,", "competition routine on sample supply-side measures self.a_run.compete_res_primary( self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any", "{ \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10,", "\"total\": { \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604,", "{ \"2009\": numpy.array([ -0.0396936, -0.04452961, -0.05150073, -0.006204243, -0.09331291]), \"2010\": numpy.array([ -0.1140346, -0.11474490, -0.09371098,", "0.22}}] cls.ok_out_point_com = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\":", "{\"2009\": 1.113501, \"2010\": 1.113501}, \"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": {", "\"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 16}, \"efficient\": {\"2009\":", "'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing')) cls.test_htcl_adj = { \"supply\": { \"['AIA_CZ1', 'single", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -90, \"rate", "each Measure object in 'measures_all_dist' following competition and supply-demand overlap adjustments. \"\"\" @classmethod", "numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([", "\"rate 2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25,", "numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\":", "\"2010\": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])}, \"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9,", "{ \"Residential\": { \"Heating\": {\"2009\": 30, \"2010\": 30}, \"Cooling\": {\"2009\": 35, \"2010\": 35}},", "11.5, \"2010\": 11}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": {", "51, \"2010\": 36}, \"efficient\": {\"2009\": 34, \"2010\": 24}}, \"competed\": { \"baseline\": {\"2009\": 25.5,", "engine object. ok_total (dict): Sample unpartitioned measure results data. ok_partitions (dict): Sample results", "measures_all (list): List of all competing measures with point value inputs. measures_secondary (list):", "8.89, \"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\":", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300,", "x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary = [cls.measures_all[1]] # Instantiate engine object", "self.measures_supply, self.adjust_key2, self.test_adopt_scheme) # Remove any market overlaps across the supply and demand", "Tests for running the engine \"\"\" # Import code to be tested import", "dict_check(self, dict1, dict2): \"\"\"Check the equality of two dicts. Args: dict1 (dict): First", "\"irr (w/ energy costs)\": {\"2009\": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]), \"2010\": numpy.array([0.50, 0.50,", "\"2009\": 8.5, \"2010\": numpy.array([6, 6.5, 8])}}, \"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\":", "\"2010\": numpy.array([12, 13, 16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {", "{ \"baseline\": {\"2009\": 45, \"2010\": 45}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"cost\": {", "\"competed\": { \"baseline\": {\"2009\": 13.88650, \"2010\": 13.88650}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"carbon\":", "{\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\":", "(list): Set of sample input cash flows. ok_out (list): Outputs that should be", "microsegment including stock cost and measure lifetime array. ok_out_point_res (dict): Measure attribute update", "a sample 'uncompeted' # market ('ok_master_mseg_dist4'), the focus of this test suite test_meas", "\"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))], [str(('primary',", "27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([", "= 6.2 cls.ok_life_ratio = 2 cls.ok_base_scost = 1 cls.ok_meas_sdelt = -1 cls.ok_esave =", "42.22366}}, \"competed\": { \"baseline\": {\"2009\": 31.66775, \"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}},", "and carbon costs)\": {\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22,", "for each sample measure # following competition/supply-demand overlap adjustments for ind, d in", "(object): Residential sample measure object. attribute_dict (dict): Dict of sample measure attributes. \"\"\"", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2,", "\"cce\": { \"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795,", "adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_res_dist(self): \"\"\"Test", "{ \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None }, \"commercial\": {", "33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output given residential measure with", "{ \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}},", "{\"2009\": 5, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20},", "\"total\": { \"baseline\": {\"2009\": 0, \"2010\": 36}, \"efficient\": {\"2009\": 0, \"2010\": 24}}, \"competed\":", "\"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": { \"residential\":", "2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2,", "22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7,", "adoption potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\":", "metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array", "{\"2009\": 15, \"2010\": 15}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 90, \"2010\": 90},", "0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}, \"efficient\": { \"2009\": numpy.array([ 10.55592, 10.67114,", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) #", "service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\":", "engine_instance = run.Engine(self.handyvars, self.measure_list) # Record the output for the test run of", "0.03566667, 0.03566667, -0.01602415, -0.01602415, -0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce", "correct data types in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme", "\"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption", "(grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\",", "family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) #", "'measures_all_dist'. measures_overlap1_dist (dict): List of supply-side Measure objects and associated contributing microsegment keys", "attribute update status, savings, and portfolio/consumer-level financial metrics that should be generated given", "[15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\":", "\"2009\": numpy.array([ 0.036380, 0.019260, -0.01934271, -0.01897398, -0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319,", "captured)\": {}, \"adjusted energy (competed and captured)\": {}}}, \"supply-demand adjustment\": { \"savings\": {", "a tuple to be of comparable structure # to the normal output from", "\"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 15, \"2010\": 15}}}, \"carbon\":", "0, \"2010\": 12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\":", "measure_instance.__dict__ def test_attributes(self): \"\"\"Compare object attributes to keys from input dict.\"\"\" for key", "{ \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.8859289), numpy.pmt(0.07, 2, 0.9582496), numpy.pmt(0.07, 2,", "2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974),", "{ \"stock cost\": { \"residential\": { \"2009\": numpy.array([95, 100, 90]), \"2010\": numpy.array([95, 100,", "{ \"2009\": 0, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5},", "\"rate 7\": 110}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\":", "the function converts terminal/leaf node lists in a dict to numpy arrays. Attributes:", "0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate 2\":", "cls.ok_ccostsave = 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07,", "\"2010\": 6}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 17, \"2010\": 12},", "\"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1, 2])}}}, \"energy\": {", "microsegment including energy, carbon, and energy/carbon cost arrays. ok_master_mseg_dist2 (dict): Sample measure master", "[\"electricity (grid)\"], \"secondary\": [\"electricity (grid)\"]}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": [\"lighting\"]},", "7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate 2\": numpy.pmt(1.0, 2,", "overlap with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side Measure objects and", "test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level", "5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {", "\"efficient\": {\"2009\": 0.5567503, \"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\":", "\"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.overlap_key: { \"rate distribution\":", "42.22366, 42.68455, 40.10668])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\":", "should be generated for each set of sample cash flows. \"\"\" @classmethod def", "{ \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\":", "link primary and secondary market microsegments (by climate, building type, structure type). compete_meas1", "updated competed master microsegments for each sample measure # following competition/secondary microsegment adjustments", "18.3, 18.8, 17.5])}}}, { \"cce\": { \"2009\": numpy.array([ -0.01306317, -0.01389378, -0.01422262, -0.01238981, -0.01613170]),", "4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344,", "numpy.pmt(0.07, 2, 0.9040091), numpy.pmt(0.07, 5, 2.050099)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1,", "10}, \"efficient\": { \"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {", "16])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 8.5, \"2010\":", "\"Max adoption potential\": { \"uncompeted\": False, \"competed\": True}}, \"consumer metrics\": False}, { \"stock\":", "\"efficient\": { \"2009\": 0.432947785, \"2010\": 0.432947785}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "[20.1, 18.7, 21.7, 21.2, 22.5])}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\":", "0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "objects and associated contributing microsegment keys that overlap with 'measures_demand' Measure objects. measure_master_msegs_out", "stock cost input values instead of point values. measures_all (list): List of all", "# Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[1]) # Verify test measure portfolio-level", "cls.sample_measure = { \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key", "\"competed\": { \"baseline\": { \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\":", "x in enumerate(self.ok_out_array): if x is not None: self.assertAlmostEqual(function_output[ind], x, places=2) else: self.assertEqual(function_output[ind],", "2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501,", "measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist4[2]) # Verify test measure consumer-level metrics", "on sample demand-side measures self.a_run.compete_res_primary( self.measures_demand, self.adjust_key1, self.test_adopt_scheme) # Remove any market overlaps", "1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.adjust_key1: { \"b1\":", "cost input values instead of point values. compete_meas2 (dict): Sample residential demand-side cooling", "incorporating all 'measures_primary_dist' objects. measures_overlap (dict): List of supply-side Measure objects and associated", "2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\": 1.113501}}, \"competed\": { \"baseline\": {\"2009\": 1.113501, \"2010\": 1.113501},", "\"efficient\": { \"2009\": numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\":", "20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}},", "\"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\":", "\"2009\": numpy.array([ 26.04455, 27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}}, \"competed\": { \"baseline\":", "home', 'existing']\": { \"total\": { yr: 10 for yr in cls.handyvars.aeo_years}, \"total affected\":", "\"secondary\": None}, \"technology\": [\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009,", "19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\":", "{\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": numpy.array([0, 1, 2]), \"2010\": numpy.array([0, 1,", "{ \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": {", "measure r5\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\": None},", "{ \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\": 16.04455}, \"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}},", "3])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10, \"2010\": numpy.array([16,", "numpy.pmt(0.25, 2, 1.44), \"rate 5\": numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626),", "{ \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": 0}}}, \"energy\":", "Check updated competed master microsegments for each sample measure # following competition/secondary microsegment", "30}}, \"competed\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}},", "\"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\":", "10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\":", "measures_demand (list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -435, \"rate", "numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "measures_secondary (list): Subset of 'measures_all' with secondary microsegments to adjust. a_run (object): Analysis", "compete measure r3\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"single family home\"], \"end_use\": {\"primary\": [\"cooling\"], \"secondary\":", "{\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([8.89, 5.11, 9.99]), \"2010\": numpy.array([8.89, 5.11,", "\"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\": { \"all\": {\"2009\":", "{\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res =", "cashflows yield correct output payback values for idx, cf in enumerate(self.ok_cashflows): self.assertAlmostEqual(engine_instance.payback(cf), self.ok_out[idx],", "{\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\":", "\"rate 1\": -135, \"rate 2\": -140, \"rate 3\": -145, \"rate 4\": -150, \"rate", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 17.77300, \"2010\": 17.77300}, \"efficient\": {\"2009\": 8.886499, \"2010\":", "should be generated for each Measure object in 'measures_all' following competition and supply-demand", "\"contributing mseg keys and values\": { cls.overlap_key: { \"stock\": { \"total\": { \"all\":", "\"2010\": numpy.array([1.73, 0.02, 9.60])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {", "\"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\":", "'Max adoption potential' cls.ok_rate = 0.07 cls.ok_master_mseg_point = { \"stock\": { \"total\": {", "30, \"2010\": 30}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": {\"2009\": 15,", "cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x)", "consumer # metrics consumer_metrics = [{ \"stock cost\": { \"residential\": { \"2009\": None,", "-4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\":", "0.004522088, 2.400830388])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}, { \"stock\": {", "numpy.array([-150, -200, -100]), \"2010\": numpy.array([-150, -200, -100])}, \"commercial\": { \"2009\": None, \"2010\": None}},", "{}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure c3\",", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}}}, \"irr (w/ energy costs)\": {\"2009\": numpy.array([ 3.370236, 6.877566,", "cls.measures_all_dist = [run.Measure( cls.handyvars, **x) for x in [ copy.deepcopy(cls.compete_meas1), cls.compete_meas2_dist, copy.deepcopy(cls.compete_meas3)]] cls.measures_secondary_dist", "5), \"2010\": numpy.repeat(None, 5) }}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07,", "the operation of the 'metrics_update' function. Verify that cashflow inputs generate expected prioritization", "for yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete measure r1\",", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\":", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\":", "values. compete_meas4 (dict): Sample residential supply-side cooling measure 2. compete_meas5 (dict): Sample residential", "numpy.array([8, 9, 9.1])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": numpy.array([8.0, 7.5, 6.5])},", "competing/interacting sample Measure objects with point value inputs. measures_demand (list): Demand-side subset of", "costs)\": { \"2009\": numpy.array([ 0.1937984, 0.1879699, 0.1748252, 0.2840909, 0.1724138]), \"2010\": numpy.array([ 0.2008032, 0.1901141,", "False}, { \"stock\": { \"cost savings (total)\": { \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2,", "\"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 34, \"2010\":", "8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {", "type(i2)) for x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf", "-0.04694426]), \"2010\": numpy.array([ 0.05350000, 0.05350000, -0.01111353, -0.01111353, -0.04976366])}, \"cce (w/ carbon cost benefits)\":", "0.02890102]), \"2010\": numpy.array([ 3.340502, 14.65534, 0.02890102])}, \"efficient\": { \"2009\": numpy.array([ 2.227001, 10.25874, 0.02119408]),", "\"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}}}, \"carbon cost\": { \"residential\":", "4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\": {", "= consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\":", "equality of two dicts. Args: dict1 (dict): First dictionary to be compared dict2", "\"efficient\": {\"2009\": 15, \"2010\": 15}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "\"2010\": numpy.array([5, 6, 7])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 10,", "22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\":", "competition routine on sample demand-side measures self.a_run_dist.compete_res_primary( self.measures_demand_dist, self.adjust_key1, self.test_adopt_scheme) # Remove any", "2, 0.2009346)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\":", "1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1 = { \"stock\": { \"total\": { \"all\":", "family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing')) cls.adjust_key2 = str( ('primary', 'AIA_CZ1',", "# Test that valid inputs yield correct anpv, irr, payback, and # cost", "share\": { \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\": 0}}, \"original", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77,", "(list): List of all competing/interacting sample Measure objects with point value inputs. measures_demand", "generated given 'ok_master_mseg_point' with a residential sample measure. ok_out_dist1 (dict): Measure attribute update", "numpy.array([0.87, 0.01, 4.80])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 1.73179114, 0.01808835,", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07,", "0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 20, \"2010\": 20},", "value inputs.\"\"\" # Run measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key,", "\"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": numpy.array([1.73,", "self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array inputs.\"\"\"", "\"adjusted energy (competed and captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\":", "numpy.array([16.04, 17.30, 10.29]), \"2010\": numpy.array([16.04, 17.30, 10.29])}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\":", "\"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {\"2009\": 10, \"2010\": 0}}}, \"energy\":", "{\"2009\": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4", "numpy.pmt(0.45, 2, 0.1896552), \"rate 4\": numpy.pmt(0.25, 2, 0.3), \"rate 5\": numpy.pmt(0.15, 2, 0.3695652),", "40, \"2010\": 40}, \"Cooling\": {\"2009\": 45, \"2010\": 45}}}} def test_ok(self): \"\"\"Test for correct", "value. Attributes: a_run (object): Sample analysis engine object. ok_total (dict): Sample unpartitioned measure", "1.73179114, 0.01808835, 9.60332155])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\":", "Output annuity equivalent Net Present Value dicts that should be generated given valid", "1}}] def test_compete_res(self): \"\"\"Test outcomes given valid sample measures w/ point value inputs.\"\"\"", "27.29736, 20.29000]), \"2010\": numpy.array([ 26.04455, 27.29736, 20.29000])}, \"efficient\": { \"2009\": numpy.array([ 19.53341, 20.47302,", "15, 9])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"cost\": { \"stock\":", "savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_point' with a", "2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182), \"rate", "for the test run of the 'metric_update' # function function_output = engine_instance.metric_update( self.measure_list[0],", "14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\": { \"baseline\": {\"2009\":", "0.02]), \"2010\": numpy.array([2.23, 9.77, 0.02])}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\":", "84, 99]), \"2010\": numpy.array([114, 105, 89, 145, 96])}, \"cost savings (total)\": { \"2009\":", "\"savings (annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9,", "Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure", "11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\":", "\"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"cost\": { \"stock\":", "competition routine on sample measures self.a_run_dist.compete_com_primary( self.measures_all_dist, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment", "\"efficient\": {\"2009\": 10, \"2010\": 10}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}},", "cls.compete_meas2_dist = { \"name\": \"sample compete measure c2 dist\", \"climate_zone\": [\"AIA_CZ1\"], \"bldg_type\": [\"assembly\"],", "def setUpClass(cls): \"\"\"Define objects/variables for use across all class functions.\"\"\" base_dir = os.getcwd()", "numpy.array([ 22.22366, 22.68455, 20.10668])}, \"efficient\": { \"2009\": numpy.array([ 11.11183, 11.34227, 10.05334]), \"2010\": numpy.array([", "= [\"2009\", \"2010\"] cls.sample_measure_res = CommonTestMeasures().sample_measure4 cls.sample_measure_com = CommonTestMeasures().sample_measure5 cls.test_adopt_scheme = 'Max adoption", "2.227001, 9.770226, 0.01926735])}, \"efficient\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501,", "run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types in measure markets attribute for", "{ \"total\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\": 5,", "\"2010\": 0.87}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\":", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -200,", "[run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8], [-10,", "\"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"cost\":", "cls.ok_out_dist3 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "\"efficient\": { \"2009\": numpy.array([5, 6, 7]), \"2010\": numpy.array([5, 6, 7])}}, \"competed\": { \"baseline\":", "None}}}, { \"stock cost\": { \"residential\": { \"2009\": 120, \"2010\": 120}, \"commercial\": {", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 20,", "\"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\":", "yr in cls.handyvars.aeo_years}}, }} cls.compete_meas1 = { \"name\": \"sample compete measure r1\", \"climate_zone\":", "2, 1.356014)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332),", "{ \"2009\": 46, \"2010\": numpy.array([44, 44, 42])}}, \"competed\": { \"baseline\": { \"2009\": 34.5,", "# Run secondary microsegment adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme)", "17]), \"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\":", "None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None}, \"markets\": {", "\"competed\": { \"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\":", "results data. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define objects/variables for use across all class", "costs)\": {\"2009\": numpy.array([ 0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889,", "costs. ok_out_dicts (list): Output annuity equivalent Net Present Value dicts that should be", "sample residential supply-side cooling measure 1 including lists of stock cost input values", "11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\":", "5}, \"measure\": {\"2009\": 1.11, \"2010\": 1.11}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 2.227001,", "\"rate 6\": numpy.pmt(0.065, 2, 1.36547), \"rate 7\": -0.75}}}}, \"irr (w/ energy costs)\": {", "5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 2,", "= cls.measures_all[2:5] cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home',", "1.73, \"2010\": 1.73}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": {\"2009\": 0.87,", "\"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300, 20.22977, 29.98073])}, \"efficient\": { \"2009\":", "'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home',", ".30}, \"Cooling\": {\"2009\": .35, \"2010\": .35}}, \"Commercial\": { \"Heating\": {\"2009\": .40, \"2010\": .40},", "test_attributes(self): \"\"\"Compare object attributes to keys from input dict.\"\"\" for key in self.sample_measure.keys():", "\"total affected\": { yr: 5 for yr in cls.handyvars.aeo_years}, \"affected savings\": { yr:", "portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist3[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[", "20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60,", "\"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": { \"2009\":", "{ \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([50.6, 57.7, 58.1, 50,", "and carbon costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248,", "\"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\":", "\"baseline\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}, \"efficient\": { \"2009\": 0, \"2010\":", "'uncompeted' # market ('ok_master_mseg_dist3'), the focus of this test suite test_meas = run.Measure(self.handyvars,", "\"supply-demand adjustment\": { \"savings\": {}, \"total\": {}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\":", "{ \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {", "(w/ energy costs)\": { \"2009\": numpy.array([ 0.2392344, 0.2347418, 0.2242152, 0.2659574, 0.2857143]), \"2010\": numpy.array([", "that the dicts from the current keys are equal self.assertCountEqual(i, i2) # Continue", "{ \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\":", "measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist2[1]) # Verify test measure portfolio-level financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[", "{ \"2009\": 25.5, \"2010\": numpy.array([18.0, 19.5, 24.0])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0,", "(total)\": { \"2009\": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]), \"2010\": numpy.array([19.9, 21.3, 18.3, 18.8,", "\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\":", "150}, \"efficient\": {\"2009\": 0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200,", "test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist3 # Create Engine instance using test measure, run function", "1\": -350, \"rate 2\": -60, \"rate 3\": -70, \"rate 4\": -380, \"rate 5\":", "-5.525120e-08])}, \"ccc (w/ energy cost benefits)\": { \"2009\": numpy.array([ -3.028667e-08, -4.740667e-08, -8.600937e-08, -8.564064e-08,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 16.04455, 17.29736, 10.29000]), \"2010\": numpy.array([ 16.04455,", "measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample measure", "self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed master microsegments for each", "5\": 105, \"rate 6\": 110, \"rate 7\": 115}, \"2010\": { \"rate 1\": 85,", "{\"2009\": -0.01602415, \"2010\": -0.01111353}, \"cce (w/ carbon cost benefits)\": { \"2009\": -0.04935749, \"2010\":", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0, 7.5, 6.5])}}},", "{ \"2009\": { \"rate 1\": -90, \"rate 2\": -95, \"rate 3\": -100, \"rate", "family home', 'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))]]} cls.measures_overlap2 = { \"measures\": cls.measures_all[0:2],", "None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": numpy.pmt(10.0, 2, 0.09917355), \"rate", "numpy.pmt(0.15, 2, 1.625709), \"rate 6\": numpy.pmt(0.065, 2, 1.820626), \"rate 7\": -1}, \"2010\": {", "in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given valid sample", "terminal/leaf node if isinstance(i, dict): # Test that the dicts from the current", "2, 0.9040091), numpy.pmt(0.07, 2, 0.9040091)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014),", "-0.08611353, -0.1247637])}, \"ccc\": { \"2009\": numpy.array([ 3.566667e-08, 3.566667e-08, -1.602415e-08, -1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([", "= 1 cls.ok_out_array = [ numpy.pmt(0.07, 6, -0.1837021), numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6,", "enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) class NumpyConversionTest(unittest.TestCase, CommonMethods): \"\"\"Test the operation of the", "numpy.pmt(0.07, 2, 1.346974), numpy.pmt(0.07, 2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07,", "1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None, \"2010\": None}}, \"carbon cost\": {", "'electricity (grid)', 'cooling', 'supply', 'ASHP', 'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "{ \"2009\": numpy.array([ 2.227001, 9.770226, 0.01926735]), \"2010\": numpy.array([ 2.227001, 9.770226, 0.01926735])}, \"efficient\": {", "cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist) # Set information needed to finalize array test measure", "34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}},", "that should be generated given valid sample inputs. \"\"\" @classmethod def setUpClass(cls): \"\"\"Define", "\"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\": -205, \"rate", "microsegments to adjust. a_run (object): Analysis engine object incorporating all 'measures_primary' objects. measures_all_dist", "10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\": {\"2009\": 50,", "supply-side cooling measure 1 including lists of stock cost input values instead of", "0.01637724])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 1.113501, 4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501,", "def test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test", "\"competed\": { \"baseline\": { \"2009\": numpy.array([ 21.11183, 21.34227, 20.05334]), \"2010\": numpy.array([ 21.11183, 21.34227,", "{ \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\":", "20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]), \"2010\": numpy.array( [15, 16, 17])}}, \"competed\":", "\"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -150, \"2010\": -50}, \"commercial\": {", "cooling measure 3. measures_all (list): List of all competing/interacting sample Measure objects with", "numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]), \"2010\": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}] cls.ok_out_dist4 =", "15, \"2010\": 15}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 0,", "{ \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]), \"2010\": numpy.array([ 27.77300,", "-70, \"rate 4\": -380, \"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}}},", "15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5, \"2010\": 10}, \"measure\": {\"2009\": 5,", "(w/ energy costs)\": { \"2009\": numpy.array([ 3.648926, 3.737086, 3.956335, 3.180956, 2.886001]), \"2010\": numpy.array([", "[\"ASHP\"], \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"],", "{ \"2009\": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}},", "0, \"2010\": 50}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 200, \"2010\": 300}, \"efficient\":", "\"cost savings (annual)\": {\"2009\": 5, \"2010\": 15}}}, { \"cce\": { \"2009\": numpy.array([ 0.03566667,", "{ \"2009\": numpy.array([ -3.10e-08, -3.10e-08, -8.269082e-08, -8.269082e-08, -1.136109e-07]), \"2010\": numpy.array([ -2.15e-08, -2.15e-08, -8.611353e-08,", "\"2010\": 20}, \"efficient\": {\"2009\": 10, \"2010\": 10}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\":", "{ \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"energy\": { \"total\": { \"baseline\": {", "numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}, \"cost savings (annual)\": { \"2009\": numpy.array([4.9, 5.3, 6.3,", "\"efficient\": { \"2009\": numpy.array([ 6.943250, 5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}}, \"lifetime\":", "\"total\": { \"baseline\": {\"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\":", "of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist4 #", "financial metrics that should be generated given 'ok_master_mseg_dist3' with a residential sample measure.", "\"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\": -50, \"2010\": -50}, \"commercial\": {", "\"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": {", "{ \"baseline\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])},", "1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}, str(('primary', 'AIA_CZ2', 'multi family home',", "\"2010\": 15}}, \"Commercial\": { \"Heating\": {\"2009\": 20, \"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\":", "test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test measure", "6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571,", "150, \"rate 7\": 160}, \"2010\": { \"rate 1\": 100, \"rate 2\": 110, \"rate", "\"total\": {}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\":", "5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 10,", "0.432947785, \"2010\": 0.432947785}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671},", "\"baseline\": {\"2009\": 26.04455, \"2010\": 26.04455}, \"efficient\": {\"2009\": 19.53341, \"2010\": 19.53341}}, \"competed\": { \"baseline\":", "7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]),", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\":", "4\": 100, \"rate 5\": 105, \"rate 6\": 110, \"rate 7\": 115}, { \"rate", "measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist2[3]) def test_metrics_ok_distrib3(self): \"\"\"Test output given residential measure", "0, \"2010\": 20}}, \"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0,", "15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\":", "demand-side Measure objects and associated contributing microsegment keys that overlap with 'measures_supply_dist' Measure", "\"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"single family home\"], \"fuel_type\": {\"primary\": [\"electricity", "-6.7, -4.2, -5.5])}}, \"energy\": { \"savings (total)\": {\"2009\": 150, \"2010\": 200}, \"savings (annual)\":", "{ \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\": 2.227001, \"2010\": 2.227001}}, \"competed\": {", "-160, \"rate 7\": -370}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None},", "\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": -90, \"rate 2\":", "Useful variables across the class. sample_measure (object): Sample measure data with lists to", "cls.ok_total = {\"2009\": 100, \"2010\": 100} cls.ok_partitions = { \"AIA CZ1\": { \"Residential\":", "6.511136, \"2010\": 6.511136}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": {\"2009\": 16.04455, \"2010\":", "\"efficient\": {\"2009\": 8.022273, \"2010\": 8.022273}}, \"competed\": { \"baseline\": {\"2009\": 8.022273, \"2010\": 8.022273}, \"efficient\":", "objects. measures_all_dist (list): List of competing measures including some measures with array inputs.", "x in range(0, len(i)): self.assertAlmostEqual(i[x], i2[x], places=2) # At the terminal/leaf node, formatted", "\"carbon cost\": { \"residential\": { \"2009\": -100, \"2010\": -100}, \"commercial\": { \"2009\": None,", "\"2009\": numpy.array([11.11, 11.34, 10.05]), \"2010\": numpy.array([11.11, 11.34, 10.05])}}}, \"energy\": { \"total\": { \"baseline\":", "\"all\": {\"2009\": 5, \"2010\": 5}, \"measure\": { \"2009\": numpy.array([0.87, 0.01, 4.80]), \"2010\": numpy.array([0.87,", "10, \"2010\": numpy.array([0, 2, 4])}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 40, \"2010\":", "\"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2,", "sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments on sample", "0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}, cls.overlap_key_scnd: {", "0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2, 0.4345794), numpy.pmt(0.07, 2,", "numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088, 2.400830388])}}}, \"carbon\": { \"total\": {", "3\": numpy.pmt(0.45, 2, 0.5826397), \"rate 4\": numpy.pmt(0.25, 2, 0.72), \"rate 5\": numpy.pmt(0.15, 2,", "30}, \"efficient\": { \"2009\": 20, \"2010\": 20}}, \"competed\": { \"baseline\": { \"2009\": 15,", "self.sample_measure2 = { \"name\": \"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None,", "}, \"demand\": { \"['AIA_CZ1', 'single family home', 'existing']\": { \"total\": { yr: 10", "\"efficient\": { \"2009\": numpy.array([ 0.5567503, 2.931068, 0.006743571]), \"2010\": numpy.array([ 0.5567503, 2.931068, 0.006743571])}}}, \"carbon\":", "'ASHP', 'existing'))]]} cls.measures_overlap2_dist = { \"measures\": cls.measures_all_dist[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home',", "{\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": {", "39.06682}, \"efficient\": {\"2009\": 26.04455, \"2010\": 26.04455}}, \"competed\": { \"baseline\": {\"2009\": 19.53341, \"2010\": 19.53341},", "{ \"total\": { \"baseline\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}, \"efficient\": {", "\"2009\": -8.269082e-08, \"2010\": -8.611353e-08}}, { \"anpv\": { \"stock cost\": { \"residential\": { \"2009\":", "and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the focus of this", "\"baseline\": {\"2009\": 42.22366, \"2010\": 42.22366}, \"efficient\": {\"2009\": 31.66775, \"2010\": 31.66775}}, \"competed\": { \"baseline\":", "numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 2, 1.356014), numpy.pmt(0.07, 5, 3.075148)])}, \"commercial\": { \"2009\": numpy.repeat(None,", "{\"2009\": 1, \"2010\": 1}, \"measure\": 1}}] cls.measures_master_msegs_out_dist = [{ \"stock\": { \"total\": {", "# Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test", "{ \"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07,", "[\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\",", "to finalize array test measure consumer # metrics consumer_metrics_dist = [{ \"stock cost\":", "run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme = \"Max adoption", "{ \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": {", "measure. ok_out_point_com (dict): Measure attribute update status, savings, and portfolio/consumer-level financial metrics that", "savings (total)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3,", "\"2009\": numpy.array([ numpy.pmt(0.07, 2, 0.4245794), numpy.pmt(0.07, 2, 0.6645794), numpy.pmt(0.07, 2, 0.5245794), numpy.pmt(0.07, 2,", "self.ok_ecostsave, self.ok_csave, self.ok_ccostsave) # Test that valid inputs yield correct anpv, irr, payback,", "\"competed\": { \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": { \"2009\": 0, \"2010\": numpy.array([8.0,", "\"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key 1\": { \"nested key 1\":", "0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 27.77300, 20.22977, 29.98073]),", "0, \"2010\": 0}}, \"total\": { cls.adjust_key2: { \"2009\": 100, \"2010\": 100}}}}, \"mseg_out_break\": {}},", "27.29736, 20.29000])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 19.53341, 20.47302, 15.21750]), \"2010\": numpy.array([", "\"commercial\": { \"2009\": None, \"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": -150,", "4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065, 2,", "9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}},", "potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346), numpy.pmt(0.07, 2, 0.4909346), numpy.pmt(0.07, 5,", "test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist1[0]) # Verify test measure savings", "{\"primary\": [\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service", "5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 1,", "{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\":", "2, 0.01724138), \"rate 4\": numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate", "\"rate 7\": -75}}}}, { \"stock cost\": { \"residential\": { \"2009\": None, \"2010\": None", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 34, \"2010\": 24}, \"efficient\": {\"2009\": 25.5, \"2010\":", "in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x,", "15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\":", "{ \"2009\": numpy.array([ 10.55592, 10.67114, 10.02667]), \"2010\": numpy.array([ 10.55592, 10.67114, 10.02667])}}}, \"carbon\": {", "# Set information needed to finalize array test measure consumer # metrics consumer_metrics", "\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = { \"stock\": { \"total\":", "\"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}, \"cost\": { \"stock\": { \"total\": {", "6\": -70, \"rate 7\": -75}, \"2010\": { \"rate 1\": -40, \"rate 2\": -50,", "numpy.array([12, 13, 16])}, \"efficient\": { \"2009\": 8.5, \"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"carbon\": {", "# keys for measure markets/savings/portfolio metrics for adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted(", "12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}}, \"energy\": {", "[15.1, 12.7, 14.1, 14.2, 15.5]), \"2010\": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5]) }}, \"competed\":", "5.394281]), \"2010\": numpy.array([ 4.601286, 4.897553, 4.260683, 4.367373, 4.089454])}, \"payback (w/ energy costs)\": {", "captured)\": {}}}}, \"mseg_out_break\": {}}}} cls.compete_meas5 = { \"name\": \"sample compete measure r5\", \"climate_zone\":", "0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,", "First sample string for competed demand-side and supply-side market microsegment key chain being", "cls.ok_master_mseg_dist4 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20}, \"measure\":", "48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])}, \"cost savings (total)\": { \"2009\": numpy.array([4.9,", "1}, \"measure\": 1}}, { \"stock\": { \"total\": { \"all\": { \"2009\": 30, \"2010\":", "7.5, 6.5])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": 0, \"2010\": numpy.array([24, 20,", "}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2, 1.97074), numpy.pmt(0.07, 2,", "numpy.pmt(0.07, 6, 4.76654), None, None, None, 0.62, 1.59, 2, 0.67, 0.005, -0.13, 7.7e-10,", "overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should be generated for each", "23, \"2010\": numpy.array([22, 22, 21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\":", "2. compete_meas5 (dict): Sample residential supply-side cooling measure 3. measures_all (list): List of", "numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\":", "numpy.array([5, 6, 7]), \"2010\": numpy.array( [5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\":", "self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods): \"\"\"Test operation of 'out_break_walk' function. Verify that function", "primary market microsegments. Attributes: handyvars (object): Useful variables across the class. test_adopt_scheme (string):", "numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18, 19.5, 24])}}, \"competed\": {", "2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5),", "\"rate 5\": -390, \"rate 6\": -150, \"rate 7\": -400}, \"2010\": { \"rate 1\":", "verify correct adoption/competition scenario # keys for measure markets/savings/portfolio metrics for adopt_scheme in", "contributing microsegment keys that overlap with 'measures_supply_dist' Measure objects. a_run_dist (object): Engine object", "cls.adjust_key2: { \"b1\": {\"2009\": -0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary", "\"baseline\": { \"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": 15, \"2010\": 15}}, \"competed\":", "numpy.array([ 6.511136, 6.824341, 5.072499])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "# Offer external code execution (include all lines below this point in all", "1}, \"measure\": 1}, \"sub-market scaling\": 1}, \"competed choice parameters\": { cls.adjust_key2: { \"b1\":", "numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([", "5), \"2010\": numpy.repeat(None, 5)}}, \"energy cost\": { \"residential\": { \"2009\": numpy.array([ numpy.pmt(0.07, 2,", "{ \"baseline\": { \"2009\": numpy.array([ 39.06682, 40.94604, 30.43499]), \"2010\": numpy.array([ 39.06682, 40.94604, 30.43499])},", "energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([ 0.1700000, 0.1233333,", "2, 1.473535), numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\":", "[\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"F32T8\"], \"secondary\": None},", "results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_point_res[0]) # Verify test measure savings self.dict_check(engine_instance.measures[0].savings[ self.test_adopt_scheme][\"uncompeted\"],", "energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} class CommonMethods(object): \"\"\"Define common methods", "25, \"2010\": 25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist1", "(int): Sample number of competed units. ok_base_life (int): Sample baseline technology lifetime. ok_product_lifetime", "{ \"baseline\": {\"2009\": 40, \"2010\": 40}, \"efficient\": {\"2009\": 40, \"2010\": 30}}, \"competed\": {", "financial metrics self.dict_check(engine_instance.measures[0].portfolio_metrics[ self.test_adopt_scheme][\"uncompeted\"], self.ok_out_dist1[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics,", "11, 10.5])}}, \"competed\": { \"baseline\": { \"2009\": 11.5, \"2010\": numpy.array([11.0, 11.0, 10.5])}, \"efficient\":", "{\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\": 20.82975, \"2010\": 20.82975}}, \"competed\": { \"baseline\": {\"2009\":", "{ \"total\": { \"baseline\": { \"2009\": 2.59768671, \"2010\": 2.59768671}, \"efficient\": { \"2009\": 1.73179114,", "\"2009\": numpy.array([ 0, 0.001808835, 1.920664]), \"2010\": numpy.array([ 0, 0.001808835, 1.920664])}}}, \"energy\": { \"total\":", "{\"2009\": 1, \"2010\": 1}, \"measure\": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}} cls.ok_out_point_res = [{", "base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate =", "-1.602415e-08, -4.694426e-08]), \"2010\": numpy.array([ 5.350000e-08, 5.350000e-08, -1.111353e-08, -1.111353e-08, -4.976366e-08])}, \"ccc (w/ energy cost", "numpy.pmt(0.07, 5, 2.887211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 1, -0.5), numpy.pmt(0.07, 2,", "following competition/supply-demand overlap adjustments for ind, d in enumerate(self.a_run_dist.measures): self.dict_check( self.measures_master_msegs_out_dist[ind], self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"])", "numpy.pmt(0.065, 2, 0.4389671), \"rate 7\": -0.25}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, -0.4318182),", "{ \"rate 1\": 85, \"rate 2\": 90, \"rate 3\": 95, \"rate 4\": 100,", "{ \"2009\": numpy.array([ 13.88650, 10.11489, 14.99037]), \"2010\": numpy.array([ 13.88650, 10.11489, 14.99037])}, \"efficient\": {", "numpy.array([ 0.865895571, 0.01085301, 6.722325]), \"2010\": numpy.array([ 0.865895571, 0.01085301, 6.722325])}}, \"competed\": { \"baseline\": {", "(annual)\": {\"2009\": 50, \"2010\": 50}, \"cost savings (total)\": {\"2009\": 5, \"2010\": 15}, \"cost", "\"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 8.886499, 5.114887,", "home\"], \"fuel_type\": {\"primary\": [\"electricity (grid)\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"lighting\"], \"secondary\":", "that measure attributes are correctly initiated. Attributes: sample_measure (object): Residential sample measure object.", "-2.7, -4.1, -4.2, -5.5]), \"2010\": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}}, \"energy\": { \"savings", "{ \"baseline\": {\"2009\": 100, \"2010\": 150}, \"efficient\": { \"2009\": numpy.array([6, 7, 1, 16,", "2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2,", "numpy.array([ 19.53341, 20.47302, 15.21750])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]),", "\"2010\": numpy.array([ 0.2008032, 0.1901141, 0.2145923, 0.2100840, 0.2222222])}}] cls.ok_out_dist2 = [{ \"savings and portfolio", "0.1833333])}, \"payback (w/ energy and carbon costs)\": {\"2009\": numpy.array([ 0.34, 0.1800000, 0.1640000, 0.16800000,", "\"2010\": 5}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0,", "-0.95, \"2010\": -0.95}, \"b2\": {\"2009\": -0.10, \"2010\": -0.10}}}, \"secondary mseg adjustments\": { \"market", "{}, \"adjusted energy (competed and captured)\": {} }}}, \"mseg_out_break\": {}}}} self.sample_measure4 = {", "\"2010\"], \"markets\": { \"Technical potential\": { \"master_mseg\": { \"stock\": { \"total\": { \"all\":", "\"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": {\"2009\": 20, \"2010\": 10}}, \"competed\": { \"baseline\":", "\"total\": { \"baseline\": { \"2009\": 23, \"2010\": numpy.array([22, 22, 21])}, \"efficient\": { \"2009\":", "and carbon costs)\": { \"2009\": 4.54, \"2010\": 4.09}, \"payback (w/ energy costs)\": {", "Run measure competition routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run", "(annual)\": { \"2009\": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]), \"2010\": numpy.array([49.4, 41.3, 44.9, 45.0,", "x in [ cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2), cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4), copy.deepcopy(cls.compete_meas5)]] cls.measures_demand_dist = cls.measures_all_dist[0:2] cls.measures_supply_dist =", "\"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 0, \"2010\": 10}}, \"competed\": { \"all\":", "\"sample measure 2\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None, \"market_scaling_fractions_source\": None,", "21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15},", "\"2010\": 1}, \"measure\": 1}}, \"mseg_adjust\": { \"contributing mseg keys and values\": { cls.adjust_key1:", "results partitioning fraction. ok_out (dict): Sample partitioned measure results data. \"\"\" @classmethod def", "tuple to be of comparable structure # to the normal output from zip_longest()", "{ \"baseline\": {\"2009\": 20, \"2010\": 35}, \"efficient\": {\"2009\": 10, \"2010\": 20}}}, \"carbon\": {", "\"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07, 2, 1.356014)}, \"commercial\": {\"2009\": None,", "5\": -65, \"rate 6\": -70, \"rate 7\": -75}, \"2010\": { \"rate 1\": -40,", "2\": numpy.pmt(1.0, 2, 0.5625), \"rate 3\": numpy.pmt(0.45, 2, 0.8739596), \"rate 4\": numpy.pmt(0.25, 2,", "numpy.array( [5, 6, 7])}}, \"competed\": { \"baseline\": { \"2009\": 5, \"2010\": 5}, \"efficient\":", "0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_savings_mkts_comp_schemes = [\"competed\", \"uncompeted\"] def test_metrics_ok_point_res(self): \"\"\"Test output", "\"2010\": numpy.array([5, 6, 7])}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "\"2009\": numpy.array([ 6.511136, 6.824341, 5.072499]), \"2010\": numpy.array([ 6.511136, 6.824341, 5.072499])}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 2,", "[\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2009\", \"2010\"],", "including competing/interacting sample Measure objects with array inputs. measures_demand_dist (list): Demand-side subset of", "0.006743571])}}}, \"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 3.340502, 14.65534, 0.02890102]), \"2010\":", "\"baseline\": {\"2009\": 0, \"2010\": 24}, \"efficient\": {\"2009\": 0, \"2010\": 18}}, \"competed\": { \"baseline\":", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 20, \"2010\": 20}},", "{ cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars,", "of identical size, # zip_longest() will use the fill value created below as", "\"baseline\": {\"2009\": 0, \"2010\": 18}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}}, \"lifetime\": {\"baseline\": {\"2009\":", "\"rate 5\": 90, \"rate 6\": 100, \"rate 7\": 110}, \"2010\": { \"rate 1\":", "17, \"2010\": 12}, \"efficient\": {\"2009\": 8.5, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\":", "use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure", "recursive # exploration of dict1 and dict2, respectively for (k, i), (k2, i2)", "\"commercial\": {\"2009\": None, \"2010\": None}}}, \"irr (w/ energy costs)\": { \"2009\": 3.45, \"2010\":", "consumer_metrics_final = [{ \"stock cost\": { \"residential\": { \"2009\": 95, \"2010\": 95}, \"commercial\":", "-150, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\": {", "42.68455, 40.10668]), \"2010\": numpy.array([ 42.22366, 42.68455, 40.10668])}, \"efficient\": { \"2009\": numpy.array([ 31.66775, 32.01341,", "{\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": 17, \"2010\": numpy.array([12, 13, 16])}}, \"competed\":", "\"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\":", "cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "25}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist3 = {", "test_adopt_scheme (string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap", "\"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, \"competed choice parameters\": { cls.overlap_key: {", "lifetime. ok_product_lifetime (float): Sample measure lifetime. ok_life_ratio (int): Sample measure->baseline lifetime ratio. ok_base_scost", "test_metrics_ok_distrib2(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test measure", "(object): Useful variables across the class. measure_list (list): List for Engine including one", "numpy.pmt(0.25, 2, 0.1), \"rate 5\": numpy.pmt(0.15, 2, 0.1521739), \"rate 6\": numpy.pmt(0.065, 2, 0.2042254),", "{ \"measures\": cls.measures_all[0:2], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand',", "\"2010\": None}}, \"energy cost\": { \"residential\": { \"2009\": numpy.pmt(0.07, 2, 1.808018), \"2010\": numpy.pmt(0.07,", "with 'measures_demand_dist' Measure objects. measures_overlap2_dist (dict): List of demand-side Measure objects and associated", "11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}}, \"energy\":", "\"2010\": 6.511136}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 39.06682, \"2010\": 39.06682}, \"efficient\": {\"2009\":", "{ \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 100, \"rate", "\"payback (w/ energy costs)\": {\"2009\": numpy.array([ 0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]), \"2010\": numpy.array([", "1\": [0.5, 0.2, 0.3, 0.4, 0.5], \"nested key 2\": 2}, \"key 2\": 5.8}}}", "else: self.assertEqual(function_output[ind], x) class PaybackTest(unittest.TestCase): \"\"\"Test the operation of the 'payback' function. Verify", "handyvars (object): Useful variables across the class. test_adopt_scheme (string): Sample consumer adoption scheme.", "numpy.pmt(0.07, 2, 1.808018), numpy.pmt(0.07, 5, 4.100197)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, 0.7009346), numpy.pmt(0.07, 1,", "'existing'))], [str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)', 'cooling', 'demand', 'windows', 'existing'))]]} cls.a_run_dist", "[\"reflector (LED)\"], \"technology_type\": { \"primary\": \"supply\", \"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\":", "\"secondary\": \"demand\"}, \"market_entry_year\": 2010, \"market_exit_year\": None, \"yrs_on_mkt\": [\"2010\"], \"markets\": { \"Technical potential\": {", "type, structure type). compete_meas1 (dict): Sample commercial supply-side lighting measure 1. compete_meas2 (dict):", "1.59, 2, 0.67, 0.005, -0.13, 7.7e-10, -9.2e-9] def test_metric_updates(self): \"\"\"Test for correct outputs", "\"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": {\"2009\": 10, \"2010\":", "{ \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 5, \"2010\": 5}}}}, \"lifetime\": {\"baseline\":", "2, 1.820626), \"rate 7\": -1}, \"2010\": { \"rate 1\": numpy.pmt(10.0, 2, 0.07438017), \"rate", "\"2009\": numpy.array([17.77, 10.23, 19.98]), \"2010\": numpy.array([17.77, 10.23, 19.98])}}, \"competed\": { \"all\": {\"2009\": 10,", "{ \"name\": \"sample measure 4\", \"active\": 1, \"market_entry_year\": None, \"market_exit_year\": None, \"market_scaling_fractions\": None,", "cost\": { \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\": None, \"2010\":", "\"total\": { \"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": { \"2009\": numpy.array([17.77, 10.23, 19.98]),", "0.009044176, 4.801660776])}, \"efficient\": { \"2009\": numpy.array([ 0.432947785, 0.004522088, 2.400830388]), \"2010\": numpy.array([ 0.432947785, 0.004522088,", "1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\": numpy.array([0, 0, 0])}}},", "10}, \"measure\": {\"2009\": 5, \"2010\": 10}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 200,", "11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 42.22366,", "supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj)", "2, 2.043061), numpy.pmt(0.07, 2, 2.223862), numpy.pmt(0.07, 2, 1.591056), numpy.pmt(0.07, 2, 1.356014)]), \"2010\": numpy.array([", "20}}, \"competed\": { \"baseline\": {\"2009\": 15, \"2010\": 15}, \"efficient\": {\"2009\": 15, \"2010\": 5}}},", "\"efficient\": {\"2009\": 10, \"2010\": 5}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 30, \"2010\":", "\"efficient\": { \"2009\": numpy.array([ 20.82975, 15.17233, 22.48555]), \"2010\": numpy.array([ 20.82975, 15.17233, 22.48555])}}, \"competed\":", "-0.04751385]), \"2010\": numpy.array([ -0.09966428, -0.10353592, -0.09523954, -0.10215319, -0.09855809])}, \"ccc\": { \"2009\": numpy.array([ -1.565543e-08,", "20, \"2010\": 35}, \"efficient\": { \"2009\": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]), \"2010\": numpy.array(", "self.test_adopt_scheme][\"uncompeted\"], self.ok_out_point_res[2]) # Verify test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_point_res[3]) def test_metrics_ok_point_com(self):", "Set information needed to finalize point value test measure # consumer metrics consumer_metrics_final", "captured)\": {}}} }, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure r3\",", "captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": {", "test measure consumer-level metrics self.dict_check(engine_instance.measures[ 0].consumer_metrics, self.ok_out_dist3[3]) def test_metrics_ok_distrib4(self): \"\"\"Test output given residential", "and captured)\": {}}} }, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\":", "Verify test measure results update status self.dict_check(engine_instance.measures[ 0].update_results, self.ok_out_dist4[0]) # Verify test measure", "60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\": 30, \"2010\": 30},", "7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176, 4.801660776]), \"2010\": numpy.array([ 0.865895571,", "\"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15, 16, 17]),", "measure data. test_adopt_scheme (string): Sample consumer adoption scheme. ok_rate (float): Sample discount rate.", "demand-side cooling measure 1. compete_meas1_dist (dict): Alternative version of sample residential demand-side cooling", "{ \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2',", "consumer metrics for ind, m in enumerate(cls.a_run.measures): m.consumer_metrics['anpv'] = consumer_metrics[ind] cls.measures_all_dist = [run.Measure(", "[\"lighting\"], \"secondary\": None}, \"technology_type\": {\"primary\": \"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"general service (CFL)\"],", "\"supply-demand adjustment\": { \"savings\": { cls.adjust_key2: { \"2009\": 0, \"2010\": 0}}, \"total\": {", "cls.ok_out_dist4 = [{ \"savings and portfolio metrics\": { \"Technical potential\": { \"uncompeted\": True,", "-0.04613129]), \"2010\": numpy.array([ 0.027285, 0.019795, -0.02023954, -0.02715319, -0.05525120])}, \"cce (w/ carbon cost benefits)\":", "\"2010\": numpy.array([-50, -100, -10])}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\":", "12}, \"efficient\": {\"2009\": 0, \"2010\": 6}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 0,", "-1.602415e-08, \"2010\": -1.111353e-08}, \"ccc (w/ energy cost benefits)\": { \"2009\": -8.269082e-08, \"2010\": -8.611353e-08}},", "{ \"all\": {\"2009\": 10, \"2010\": 10}, \"measure\": {\"2009\": 1.73, \"2010\": 1.73}}, \"competed\": {", "\"measure\": {\"2009\": 0, \"2010\": 5}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\":", "cooling measure 2. compete_meas5 (dict): Sample residential supply-side cooling measure 3. measures_all (list):", "\"stock\": { \"total\": { \"baseline\": {\"2009\": 2.227001, \"2010\": 2.227001}, \"efficient\": {\"2009\": 1.113501, \"2010\":", "**self.sample_measure_res) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_dist2 # Create Engine instance using test measure, run", "11.11183}}, \"competed\": { \"baseline\": {\"2009\": 11.11183, \"2010\": 11.11183}, \"efficient\": {\"2009\": 0, \"2010\": 0}}},", "numpy.array([ 8.886499, 5.114887, 9.990366]), \"2010\": numpy.array([ 8.886499, 5.114887, 9.990366])}}, \"competed\": { \"baseline\": {", "(annual)\": { \"2009\": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]), \"2010\": numpy.array([14.9, 16.3, 13.3, 13.8,", "{ \"2009\": 1.73179114, \"2010\": 1.73179114}, \"efficient\": { \"2009\": 0.865895571, \"2010\": 0.865895571}}, \"competed\": {", "0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]), \"2010\": numpy.array([ 0.1133333, 0.08222222, 0.1488889, 0.09333333, 0.1222222])}}] cls.ok_out_dist3", "{\"2009\": 20.82975, \"2010\": 20.82975}, \"efficient\": {\"2009\": 6.943250, \"2010\": 6.943250}}}, \"cost\": { \"stock\": {", "{}}}, \"mseg_out_break\": {}}}} cls.compete_meas3 = { \"name\": \"sample compete measure c3\", \"climate_zone\": [\"AIA_CZ1\"],", "-0.5), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 2, 0.2009346), numpy.pmt(0.07, 5, 2.040408)])}, \"commercial\": { \"2009\":", "self.ok_out_point_com[3]) def test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize", "(string): Sample consumer adoption scheme. test_htcl_adj (dict): Sample dict with supply-demand overlap data.", "-200}}}, \"carbon cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "\"efficient\": {\"2009\": 20, \"2010\": 8}}, \"competed\": { \"baseline\": {\"2009\": 5, \"2010\": 8}, \"efficient\":", "2, 0.4909346), numpy.pmt(0.07, 5, 2.265408)])}, \"commercial\": { \"2009\": numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5)}},", "5, 2.837211)]), \"2010\": numpy.array([ numpy.pmt(0.07, 1, -0.255), numpy.pmt(0.07, 1, -0.185), numpy.pmt(0.07, 2, 0.3659346),", "focus of this test suite test_meas = run.Measure(self.handyvars, **self.sample_measure_com) test_meas.markets[self.test_adopt_scheme][\"uncompeted\"][ \"master_mseg\"] = self.ok_master_mseg_point", "-0.09371098, -0.072742925, -0.11206083])}, \"ccc\": { \"2009\": numpy.array([ -1.608851e-08, -1.689124e-08, -1.693885e-08, -1.602415e-08, -1.614253e-08]), \"2010\":", "\"2010\": 10}, \"efficient\": { \"2009\": 5, \"2010\": 5}}, \"competed\": { \"baseline\": { \"2009\":", "10}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 40,", "110, \"rate 7\": 115}, \"2010\": { \"rate 1\": 85, \"rate 2\": 90, \"rate", "4\": -60, \"rate 5\": -65, \"rate 6\": -70, \"rate 7\": -75}}}}, { \"stock", "Second sample string for secondary market microsegment key chain being tested. secnd_adj_key (string):", "\"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate 1\": 50,", "\"competed\": { \"baseline\": { \"2009\": 1.29884336, \"2010\": 1.29884336}, \"efficient\": { \"2009\": 0.432947785, \"2010\":", "\"all\": {\"2009\": 20, \"2010\": 20}, \"measure\": {\"2009\": 17.77, \"2010\": 17.77}}, \"competed\": { \"all\":", "\"2010\": 0.5567503}}}, \"carbon\": { \"total\": { \"baseline\": {\"2009\": 3.340502, \"2010\": 3.340502}, \"efficient\": {\"2009\":", "\"2010\": 20}, \"Cooling\": {\"2009\": 25, \"2010\": 25}}}, \"AIA CZ2\": { \"Residential\": { \"Heating\":", "1}, \"measure\": 1}, \"sub-market scaling\": 1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)',", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 2.59768671, 0.02713253, 14.40498233]), \"2010\": numpy.array([", "2}} cls.ok_master_mseg_dist3 = { \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 20},", "\"energy\": { \"total\": { \"baseline\": {\"2009\": 20, \"2010\": 20}, \"efficient\": { \"2009\": numpy.array([15,", "\"carbon\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 63.33550, 64.02682, 60.16002]), \"2010\": numpy.array([", "energy (total captured)\": {}, \"adjusted energy (competed and captured)\": {}}}}, \"mseg_out_break\": {}}, \"Max", "use across all class functions.\"\"\" base_dir = os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) #", "adopt_scheme in self.handyvars.adopt_schemes: # Markets self.assertEqual(list(sorted( engine_instance.measures[0].markets[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes) # Savings self.assertEqual(list(sorted( engine_instance.measures[0].savings[adopt_scheme].keys())), self.ok_savings_mkts_comp_schemes)", "\"stock cost\": { \"residential\": {\"2009\": None, \"2010\": None}, \"commercial\": { \"2009\": { \"rate", "0.01808835, 9.60332155])}, \"efficient\": { \"2009\": numpy.array([ 1.29884336, 0.01356626, 7.20249116]), \"2010\": numpy.array([ 1.29884336, 0.01356626,", "\"stock\": { \"total\": { \"all\": {\"2009\": 30, \"2010\": 30}, \"measure\": {\"2009\": 22.22, \"2010\":", "[\"uncompeted\", \"competed\"]: tested_data = \\ measure_instance.markets[adopt_scheme][comp_scheme] self.assertTrue( all([isinstance(x, y) for x, y in", "1}}, str(('primary', 'AIA_CZ2', 'single family home', 'electricity (grid)', 'lighting', 'reflector (LED)')): { \"stock\":", "Initialize test measure and assign it a sample 'uncompeted' # market ('ok_master_mseg_dist1'), the", "measure object. attribute_dict (dict): Dict of sample measure attributes. \"\"\" @classmethod def setUpClass(cls):", "numpy.pmt(0.07, 2, 0.3845794)]), \"2010\": numpy.array([ numpy.pmt(0.07, 2, 0.4459346), numpy.pmt(0.07, 2, 0.5159346), numpy.pmt(0.07, 2,", "the supply and demand sides of # heating and cooling self.a_run.htcl_adj( self.measures_supply, self.test_adopt_scheme,", "{ \"total\": { \"baseline\": {\"2009\": 10, \"2010\": 15}, \"efficient\": { \"2009\": numpy.array( [15.1,", "costs)\": {\"2009\": numpy.array([ 4.442382, 8.824726, 5.647891, 5.501689, 4.082098]), \"2010\": numpy.array([ 8.446248, 11.795815, 6.327488,", "2.99])}, \"irr (w/ energy and carbon costs)\": {\"2009\": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]),", "22.5])}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 2}} cls.ok_master_mseg_dist2 = {", "10.05334]), \"2010\": numpy.array([ 11.11183, 11.34227, 10.05334])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]), \"2010\":", "6])}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array([6, 5, 3])}}}, \"carbon\": { \"total\": {", "\"2010\": 0}}}}}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": { \"stock\": { \"total\":", "\"2010\": 31.66775}, \"efficient\": {\"2009\": 10.55592, \"2010\": 10.55592}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1},", "\"2010\": numpy.array([15, 16, 17])}}, \"competed\": { \"baseline\": {\"2009\": 10, \"2010\": 10}, \"efficient\": {", "status, savings, and portfolio/consumer-level financial metrics that should be generated given 'ok_master_mseg_dist1' with", "10, \"2010\": 20}, \"measure\": {\"2009\": 15, \"2010\": 25}}, \"competed\": { \"all\": {\"2009\": 5,", "\"baseline\": { \"2009\": 34.5, \"2010\": numpy.array([33.0, 33.0, 31.5])}, \"efficient\": { \"2009\": 11.5, \"2010\":", "numpy.pmt(0.07, 2, 1.202332), numpy.pmt(0.07, 2, 1.247533), numpy.pmt(0.07, 2, 1.130011)]) }, \"commercial\": { \"2009\":", "0.02713253, 14.40498233]), \"2010\": numpy.array([ 2.59768671, 0.02713253, 14.40498233])}, \"efficient\": { \"2009\": numpy.array([ 1.73179114, 0.01808835,", "-180, \"rate 6\": -230, \"rate 7\": -200}}}, \"carbon cost\": { \"residential\": { \"2009\":", "adjustments on sample measure self.a_run.secondary_adj( self.measures_secondary, self.overlap_key_scnd, self.secnd_adj_key, self.test_adopt_scheme) # Check updated competed", "\"efficient\": {\"2009\": 1.29884336, \"2010\": 1.29884336}}, \"competed\": { \"baseline\": {\"2009\": 0.865895571, \"2010\": 0.865895571}, \"efficient\":", "(dict): Sample measure master microsegment including stock cost array. ok_master_mseg_dist3 (dict): Sample measure", "{ cls.secnd_adj_key: { \"2009\": 0, \"2010\": 0}}}}, \"supply-demand adjustment\": { \"savings\": {}, \"total\":", "consumer_metrics_final_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 10, \"2010\": 10},", "class. sample_measure (object): Sample measure data with lists to convert. \"\"\" @classmethod def", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) sample_measure = CommonTestMeasures().sample_measure cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)] cls.ok_cashflows", "the supply and demand sides of # heating and cooling self.a_run_dist.htcl_adj( self.measures_demand_dist, self.test_adopt_scheme,", "\"2010\": numpy.array([6.0, 6.5, 8.0])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\":", "in measure markets attribute for adopt_scheme in self.handyvars.adopt_schemes: for comp_scheme in [\"uncompeted\", \"competed\"]:", "1\": numpy.pmt(10.0, 2, 0.04958678), \"rate 2\": numpy.pmt(1.0, 2, 0.375), \"rate 3\": numpy.pmt(0.45, 2,", "\"2009\": -50, \"2010\": -50}, \"commercial\": { \"2009\": None, \"2010\": None}}}, { \"stock cost\":", "{ \"market share\": { \"original energy (total captured)\": { cls.secnd_adj_key: {\"2009\": 0, \"2010\":", "\"2009\": { \"rate 1\": -190, \"rate 2\": -195, \"rate 3\": -190, \"rate 4\":", "given as a tuple to be of comparable structure # to the normal", "1\": numpy.pmt(10.0, 2, 0.09917355), \"rate 2\": numpy.pmt(1.0, 2, 0.75), \"rate 3\": numpy.pmt(0.45, 2,", "cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2] cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5] cls.measures_overlap1_dist = { \"measures\": cls.measures_all_dist[2:5], \"keys\": [[str(('primary',", "0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}}, \"secondary mseg adjustments\": { \"market share\":", "{ \"2009\": 34, \"2010\": numpy.array([24, 26, 32])}, \"efficient\": { \"2009\": 25.5, \"2010\": numpy.array([18,", "33}, \"efficient\": {\"2009\": 11.5, \"2010\": 11}}}}, \"lifetime\": {\"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\":", "{ \"market_entry_year\": None, \"market_exit_year\": None, \"markets\": { \"Technical potential\": { \"key 1\": {", "30}, \"efficient\": { \"2009\": numpy.array([20, 21, 22]), \"2010\": numpy.array([20, 21, 22])}}, \"competed\": {", "6.943250, 5.057443, 7.495183])}}}, \"cost\": { \"stock\": { \"total\": { \"baseline\": { \"2009\": numpy.array([", "\"full service\", \"structure_type\": [\"new\", \"existing\"], \"climate_zone\": [\"AIA_CZ1\", \"AIA_CZ2\"], \"bldg_type\": [\"assembly\"], \"fuel_type\": {\"primary\": [\"electricity\"],", "21])}}, \"competed\": { \"all\": {\"2009\": 15, \"2010\": 15}, \"measure\": { \"2009\": 11.5, \"2010\":", "31.66775, 32.01341, 30.08001]), \"2010\": numpy.array([ 31.66775, 32.01341, 30.08001])}}, \"competed\": { \"baseline\": { \"2009\":", "\"supply\", \"secondary\": None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None},", "routine on sample measures self.a_run.compete_com_primary( self.measures_all, self.overlap_key, self.test_adopt_scheme) # Run secondary microsegment adjustments", "None}, \"technology\": {\"primary\": [\"resistance heat\", \"ASHP\", \"GSHP\", \"room AC\"], \"secondary\": None}, \"markets\": {", "\"2010\": 0}}}}}, \"mseg_out_break\": {}}}} cls.measures_all = [run.Measure( cls.handyvars, **x) for x in [", "cls.measures_overlap1 = { \"measures\": cls.measures_all[2:5], \"keys\": [[str(('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',", "\"baseline\": { \"2009\": numpy.array([ 13.02227, 13.64868, 10.14500]), \"2010\": numpy.array([ 13.02227, 13.64868, 10.14500])}, \"efficient\":", "None}}, \"energy cost\": { \"residential\": { \"2009\": -150, \"2010\": -150}, \"commercial\": { \"2009\":", "None}, \"technology\": [\"ASHP\"], \"technology_type\": {\"primary\": \"demand\", \"secondary\": None}, \"market_entry_year\": 2009, \"market_exit_year\": None, \"yrs_on_mkt\":", "numpy.array( [20, 21, 22]), \"2010\": numpy.array( [20, 21, 22])}}, \"competed\": { \"baseline\": {", "\"payback (w/ energy and carbon costs)\": { \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 =", "{ \"2009\": -200, \"2010\": -200}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\":", "(list): Demand-side subset of 'measures_all'. measures_supply (list): Supply-side subset of 'measures_all'. measures_overlap1 (dict):", "adjustments for ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test", "\"2010\": numpy.array([ 1.29884336, 0.01356626, 7.20249116])}}, \"competed\": { \"baseline\": { \"2009\": numpy.array([ 0.865895571, 0.009044176,", "os.getcwd() cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles()) cls.handyvars.aeo_years = [\"2009\", \"2010\"] cls.handyvars.retro_rate = 0 cls.test_adopt_scheme", "\"rate 4\": numpy.pmt(0.25, 2, 1.08), \"rate 5\": numpy.pmt(0.15, 2, 1.219282), \"rate 6\": numpy.pmt(0.065,", "[\"electricity\"], \"secondary\": None}, \"fuel_switch_to\": None, \"end_use\": {\"primary\": [\"heating\", \"cooling\"], \"secondary\": None}, \"technology_type\": {\"primary\":", "are found in i and i2, # respectively, at the current level of", "{\"2009\": 0, \"2010\": 0}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 60, \"2010\": 60},", "common sample measures for tests. Attributes: sample_measure (dict): Sample residential measure #1. sample_measure2", "(w/ energy and carbon costs)\": {\"2009\": numpy.array([ 1.941176, 4.555556, 5.647891, 5.501689, 4.543007]), \"2010\":", "{ \"baseline\": { \"2009\": 15, \"2010\": 15}, \"efficient\": { \"2009\": 5, \"2010\": 5}}}},", "keys from input dict.\"\"\" for key in self.sample_measure.keys(): self.assertEqual( self.attribute_dict[key], self.sample_measure[key]) class OutputBreakoutDictWalkTest(unittest.TestCase,", "{ \"2009\": 5, \"2010\": 5}, \"efficient\": { \"2009\": 0, \"2010\": numpy.array( [0, 1,", "ind, d in enumerate(self.a_run.measures): self.dict_check( self.measures_master_msegs_out[ind], self.a_run.measures[ind].markets[self.test_adopt_scheme][ \"competed\"][\"master_mseg\"]) def test_compete_com_dist(self): \"\"\"Test outcomes given", "\"2010\": 8.89}}}, \"energy\": { \"total\": { \"baseline\": {\"2009\": 27.77300, \"2010\": 27.77300}, \"efficient\": {\"2009\":", ".20, \"2010\": .20}, \"Cooling\": {\"2009\": .25, \"2010\": .25}}}, \"AIA CZ2\": { \"Residential\": {", "\"\"\"Test 'compete_com_primary' and 'secondary_adj' functions. Verify that 'compete_com_primary' correctly calculates primary market shares", "{\"2009\": 60, \"2010\": 60}, \"efficient\": {\"2009\": 60, \"2010\": 40}}, \"competed\": { \"baseline\": {\"2009\":", "30}, \"measure\": { \"2009\": numpy.array([22.22, 22.68, 20.11]), \"2010\": numpy.array([22.22, 22.68, 20.11])}}, \"competed\": {", "0.1, 0.1, 0.1, 0.1, 0.1, 0.4], \"2010\": [ 0.1, 0.1, 0.1, 0.1, 0.1,", "numpy.array([ -8.904701e-08, -9.630094e-08, -1.036196e-07, -7.469082e-08, -6.651191e-08]), \"2010\": numpy.array([ -8.587114e-08, -9.682543e-08, -7.964446e-08, -8.216772e-08, -7.592937e-08])}},", "self.test_adopt_scheme) # Check updated competed master microsegments for each sample measure # following", "**self.sample_measure) # Test for correct data types in measure markets attribute for adopt_scheme", "\"2010\": numpy.array([0, 0, 0])}}}, \"energy\": { \"total\": { \"baseline\": { \"2009\": numpy.array([ 26.04455,", "58.1, 50, 51.1]), \"2010\": numpy.array( [100.6, 108.7, 105.1, 105, 106.1])}}}, \"cost\": { \"stock\":", "Instantiate measure measure_instance = run.Measure(self.handyvars, **self.sample_measure) # Test for correct data types in", "object in 'measures_all' following competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market", "{ \"total\": { \"baseline\": {\"2009\": 30, \"2010\": 30}, \"efficient\": { \"2009\": numpy.array([20, 21,", "}, \"mseg_out_break\": {}}, \"Max adoption potential\": { \"master_mseg\": {}, \"mseg_adjust\": { \"contributing mseg", "-100])}, \"commercial\": { \"2009\": None, \"2010\": None}}, \"carbon cost\": { \"residential\": { \"2009\":", "160}}}, \"energy cost\": { \"residential\": { \"2009\": None, \"2010\": None}, \"commercial\": { \"2009\":", "test_metrics_ok_distrib1(self): \"\"\"Test output given residential measure with array inputs.\"\"\" # Initialize test measure", "16, 17])}}, \"competed\": { \"baseline\": { \"2009\": 10, \"2010\": 10}, \"efficient\": { \"2009\":", "competition and supply-demand overlap adjustments. measure_master_msegs_out_dist (dict): Master market microsegments that should be", "{ \"2009\": 0.2, \"2010\": 0.22}}] cls.ok_out_dist1 = [{ \"savings and portfolio metrics\": {", "4.885113, 0.009633673]), \"2010\": numpy.array([ 1.113501, 4.885113, 0.009633673])}, \"efficient\": { \"2009\": numpy.array([0, 0, 0]),", "numpy.repeat(None, 5), \"2010\": numpy.repeat(None, 5) }}}, \"irr (w/ energy costs)\": { \"2009\": numpy.array([", "inputs. measures_secondary (list): Subset of 'measures_all' with secondary microsegments to adjust. a_run (object):", "100, \"rate 2\": 110, \"rate 3\": 120, \"rate 4\": 130, \"rate 5\": 140,", "5, \"2010\": 5}}}}, \"lifetime\": { \"baseline\": {\"2009\": 1, \"2010\": 1}, \"measure\": 1}, \"sub-market", "= consumer_metrics_dist[ind] cls.measures_master_msegs_out = [{ \"stock\": { \"total\": { \"all\": {\"2009\": 20, \"2010\":", "5.057443, 7.495183]), \"2010\": numpy.array([ 6.943250, 5.057443, 7.495183])}}}, \"carbon\": { \"total\": { \"baseline\": {" ]
[]
[ "operation_id=None): transformed = [] for record in response[self.response_key]: for activity in record.get('activity', []):", "BaseStream import singer from datetime import datetime from dateutil.parser import parse LOGGER =", "} response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids", "method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'],", "data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number", "'_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = [] for", "offset < total_campaigns: campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\":", "} ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = [] for record in", "of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids: operations.append( { 'method':", "record in response[self.response_key]: for activity in record.get('activity', []): new_activity = dict(record) del new_activity['activity']", "\"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE))", "[] while offset < total_campaigns: campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\":", "= 1000 offset = 0 campaign_ids = [] while offset < total_campaigns: campaign_params", "1000 offset = 0 campaign_ids = [] while offset < total_campaigns: campaign_params =", "+= count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids:", "self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed =", "key, value in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] = datetime.now().strftime(\"%Y-%m-%d", "campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self,", "offset = 0 campaign_ids = [] while offset < total_campaigns: campaign_params = {", "< total_campaigns: campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\",", "\"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns']", "value in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "response[self.response_key]: for activity in record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for key,", "for record in response[self.response_key]: for activity in record.get('activity', []): new_activity = dict(record) del", "in record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for key, value in activity.items():", "<gh_stars>0 from tap_mailchimp.streams.base import BaseStream import singer from datetime import datetime from dateutil.parser", "import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\"", "ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing", "'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = []", "= \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for", "self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } }", "{}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset = 0 campaign_ids = []", "self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x:", "\"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data =", "'/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations)", "while offset < total_campaigns: campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(),", "\"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET',", "total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset", "\"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items']", "response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count", "'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response,", "= response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset +=", "response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset += count", "in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") transformed.append(new_activity)", "import BaseStream import singer from datetime import datetime from dateutil.parser import parse LOGGER", "count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids: operations.append(", "import singer from datetime import datetime from dateutil.parser import parse LOGGER = singer.get_logger()", "self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = [] for record in response[self.response_key]: for", "tap_mailchimp.streams.base import BaseStream import singer from datetime import datetime from dateutil.parser import parse", "class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self):", "{ 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None):", "def get_stream_data(self, response, operation_id=None): transformed = [] for record in response[self.response_key]: for activity", "for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params':", "new_activity['activity'] for key, value in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date']", "0 campaign_ids = [] while offset < total_campaigns: campaign_params = { \"count\": count,", "from tap_mailchimp.streams.base import BaseStream import singer from datetime import datetime from dateutil.parser import", "= response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number of", "count = 1000 offset = 0 campaign_ids = [] while offset < total_campaigns:", "datetime import datetime from dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD", "LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset = 0", "100 count = 1000 offset = 0 campaign_ids = [] while offset <", "\"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100", "= [] for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id':", "= \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count =", "sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset =", "= 100 count = 1000 offset = 0 campaign_ids = [] while offset", "'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def", "total_campaigns: campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\":", "campaign_ids = [] while offset < total_campaigns: campaign_params = { \"count\": count, \"offset\":", "in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since':", "import datetime from dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD =", "for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset = 0 campaign_ids =", "campaign_ids += list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids)))", "LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids: operations.append( {", "campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD,", "{ \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response", "list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations =", "parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key", "= [] while offset < total_campaigns: campaign_params = { \"count\": count, \"offset\": offset,", "\"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response =", "API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data", "'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed", "= singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\"", "= \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns =", "\"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000", "x: x['id'], data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = []", "in response[self.response_key]: for activity in record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for", "dict(record) del new_activity['activity'] for key, value in activity.items(): new_activity[key] = value new_activity =", "+= list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations", "TABLE = \"reports_email_activity\" response_key = \"emails\" def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns", "data for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset = 0 campaign_ids", "[] for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id,", "response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data)) offset += count LOGGER.info('Number of campaigns:", "data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id", "singer from datetime import datetime from dateutil.parser import parse LOGGER = singer.get_logger() class", "activity in record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for key, value in", "= 0 campaign_ids = [] while offset < total_campaigns: campaign_params = { \"count\":", "total_campaigns = 100 count = 1000 offset = 0 campaign_ids = [] while", "LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key =", "def sync_data(self): LOGGER.info(\"Syncing data for {}\".format(self.TABLE)) total_campaigns = 100 count = 1000 offset", "} } ) self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = [] for record", ") self.batch_sync_data(operations) def get_stream_data(self, response, operation_id=None): transformed = [] for record in response[self.response_key]:", "= self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda", "response, operation_id=None): transformed = [] for record in response[self.response_key]: for activity in record.get('activity',", "del new_activity['activity'] for key, value in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity)", "params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids += list(map(lambda x: x['id'], data))", "get_stream_data(self, response, operation_id=None): transformed = [] for record in response[self.response_key]: for activity in", "campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': {", "count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns',", "offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params)", "response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data = response['campaigns'] campaign_ids +=", "= { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" }", "\"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns", "'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' } } )", "= [] for record in response[self.response_key]: for activity in record.get('activity', []): new_activity =", "datetime from dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\"", "from datetime import datetime from dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream):", "campaign_params = { \"count\": count, \"offset\": offset, \"since_send_time\": (parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\"", "record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for key, value in activity.items(): new_activity[key]", "(parse(self.config.get('start_date'))).isoformat(), \"sort_field\": \"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns =", "new_activity = dict(record) del new_activity['activity'] for key, value in activity.items(): new_activity[key] = value", "activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") transformed.append(new_activity) return", "operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields':", "x['id'], data)) offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for", "{ 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links'", "= dict(record) del new_activity['activity'] for key, value in activity.items(): new_activity[key] = value new_activity", "offset += count LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids))) operations = [] for campaign_id in", "[]): new_activity = dict(record) del new_activity['activity'] for key, value in activity.items(): new_activity[key] =", "new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") transformed.append(new_activity) return transformed", "'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(), 'exclude_fields': '_links,emails._links' }", "operations = [] for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id),", "for activity in record.get('activity', []): new_activity = dict(record) del new_activity['activity'] for key, value", "singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE = \"reports_email_activity\" response_key = \"emails\" def", "\"send_time\", \"sort_dir\": \"ASC\" } response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params) total_campaigns = response['total_items'] data", "campaign_ids: operations.append( { 'method': self.API_METHOD, 'path': '/reports/{}/email-activity'.format(campaign_id), 'operation_id': campaign_id, 'params': { 'since': self.get_start_date(self.TABLE).isoformat(),", "for key, value in activity.items(): new_activity[key] = value new_activity = self.transform_record(new_activity) new_activity['report_date'] =", "transformed = [] for record in response[self.response_key]: for activity in record.get('activity', []): new_activity", "[] for record in response[self.response_key]: for activity in record.get('activity', []): new_activity = dict(record)", "from dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE", "{}'.format(len(campaign_ids))) operations = [] for campaign_id in campaign_ids: operations.append( { 'method': self.API_METHOD, 'path':", "dateutil.parser import parse LOGGER = singer.get_logger() class ReportsEmailActivityStream(BaseStream): API_METHOD = \"GET\" TABLE =" ]
[ ") result = abs_result or rel_result except Exception: pass if result is NotImplemented", "math.isclose and cmath.isclose.\"\"\" import cmath import logging import math import numbers LOG =", "TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0", "close function.\"\"\" return lambda a, b: a < b or self(a, b) def", "a < b and not self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\"", "except Exception: pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol =", "(\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic,", "True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False", "return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness", "and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol", "if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol),", "isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>>", "abs_tol is not None and difference <= abs_tol rel_result = rel_tol is not", "much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b: a > b and not", "import numbers LOG = logging.getLogger(\"isclose\") try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"):", "True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b = type(b) if", "isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else", "rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and", "pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs = kwargs", "self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b: a >", "\"\"\"greater or close function.\"\"\" return lambda a, b: a > b or self(a,", "bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return", "lambda a, b: a > b or self(a, b) if __name__ == \"__main__\":", "result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol", "cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol", "not self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a, b:", "not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol =", "= True else: difference = abs(a - b) abs_result = abs_tol is not", "kwargs @property def kwargs(self): return self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply", "function. >>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return self def notclose(self):", "\"\"\" type_a = type(a) type_b = type(b) if type_a != type_b and issubclass(type_b,", "= _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0,", "isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs = kwargs @property def kwargs(self): return", "False \"\"\" type_a = type(a) type_b = type(b) if type_a != type_b and", "result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if", "x.isclose(y, **kwargs) except Exception: pass if result is NotImplemented: try: result = y.isclose(x,", "issubclass(type_b, type_a): x, y = b, a else: x, y = a, b", "#!/usr/bin/env python3 # Copyright 2021 <NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath", "cmath import logging import math import numbers LOG = logging.getLogger(\"isclose\") try: import version", "def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a, b: a > b", "b: result = True else: difference = abs(a - b) abs_result = abs_tol", "a < b or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda", "a, b: a > b or self(a, b) if __name__ == \"__main__\": import", "self._kwargs = kwargs @property def kwargs(self): return self._kwargs def __call__(self, a, b) ->", "== b: result = True else: difference = abs(a - b) abs_result =", "bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>>", "\"\"\" return self def notclose(self): \"\"\"not close function. >>> myisclose = IsClose() >>>", "isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a", "not self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a, b:", "abs(a - b) abs_result = abs_tol is not None and difference <= abs_tol", "try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version", "and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None", "math import numbers LOG = logging.getLogger(\"isclose\") try: import version as _version if not", "to math.isclose and cmath.isclose.\"\"\" import cmath import logging import math import numbers LOG", "NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result", "myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not close", "greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a, b: a > b or", "IsClose() >>> callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not close function. >>>", "self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b: a <", "function.\"\"\" return lambda a, b: a > b and not self(a, b) def", "= (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool:", "python3 # Copyright 2021 <NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import", ">>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\"", "and difference <= abs_tol rel_result = rel_tol is not None and difference <=", "else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif isinstance(a, numbers.Complex)", "result = y.isclose(x, **kwargs) except Exception: pass if result is NotImplemented: rel_tol =", "_version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self, s:", "1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a =", "type(a) type_b = type(b) if type_a != type_b and issubclass(type_b, type_a): x, y", "b: a > b and not self(a, b) def greater_than_or_close(self): \"\"\"greater or close", ">>> callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not close function. >>> myisclose", "isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close)", "else: difference = abs(a - b) abs_result = abs_tol is not None and", "-> None: self._kwargs = kwargs @property def kwargs(self): return self._kwargs def __call__(self, a,", "b or a == b: result = True else: difference = abs(a -", "return lambda a, b: a > b or self(a, b) if __name__ ==", "isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol", "and difference <= rel_tol * max( abs(a), abs(b) ) result = abs_result or", "and cmath.isclose.\"\"\" import cmath import logging import math import numbers LOG = logging.getLogger(\"isclose\")", "!= type_b and issubclass(type_b, type_a): x, y = b, a else: x, y", "numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is", "b: a < b or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return", "abs(b) ) result = abs_result or rel_result except Exception: pass if result is", "abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b,", "= IsClose() >>> callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not close function.", "and not self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a,", "def __init__(self, **kwargs) -> None: self._kwargs = kwargs @property def kwargs(self): return self._kwargs", "type_a != type_b and issubclass(type_b, type_a): x, y = b, a else: x,", "lambda a, b: not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda", "def notclose(self): \"\"\"not close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\"", "= y.isclose(x, **kwargs) except Exception: pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\",", "None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif a", "is NotImplemented: try: result = y.isclose(x, **kwargs) except Exception: pass if result is", "cmath.isclose.\"\"\" import cmath import logging import math import numbers LOG = logging.getLogger(\"isclose\") try:", "float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif a is b", "or close function.\"\"\" return lambda a, b: a > b or self(a, b)", "\"\"\"definitely less function.\"\"\" return lambda a, b: a < b and not self(a,", "def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b: a < b and", "lambda self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def", "y = a, b result = NotImplemented try: result = x.isclose(y, **kwargs) except", "if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try:", "IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close", "kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9", "math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol", "and issubclass(type_b, type_a): x, y = b, a else: x, y = a,", "result is NotImplemented: try: result = y.isclose(x, **kwargs) except Exception: pass if result", "= kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose(", "- b) abs_result = abs_tol is not None and difference <= abs_tol rel_result", "**kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0)", "1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def", "a is b or a == b: result = True else: difference =", "close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a,", "lambda a, b: a > b and not self(a, b) def greater_than_or_close(self): \"\"\"greater", "isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b =", "numbers LOG = logging.getLogger(\"isclose\") try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise", "pass if result is NotImplemented: try: result = y.isclose(x, **kwargs) except Exception: pass", "compare {a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class", "a > b or self(a, b) if __name__ == \"__main__\": import doctest doctest.testmod()", "not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self,", "type(b) if type_a != type_b and issubclass(type_b, type_a): x, y = b, a", "b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close) True", "def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True", "\"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized", "result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol", "if type_a != type_b and issubclass(type_b, type_a): x, y = b, a else:", "or a == b: result = True else: difference = abs(a - b)", "< b and not self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return", "s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs)", "is b or a == b: result = True else: difference = abs(a", "lambda a, b: a < b or self(a, b) def much_greater_than(self): \"\"\"definitely greater", "\"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j,", "= math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if", "= rel_tol is not None and difference <= rel_tol * max( abs(a), abs(b)", ") elif a is b or a == b: result = True else:", "type_a): x, y = b, a else: x, y = a, b result", "1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b = type(b)", "type_b and issubclass(type_b, type_a): x, y = b, a else: x, y =", "rel_tol * max( abs(a), abs(b) ) result = abs_result or rel_result except Exception:", "def kwargs(self): return self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>>", "\"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose = IsClose()", "elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if", "self def notclose(self): \"\"\"not close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True", "import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version =", "__init__(self, **kwargs) -> None: self._kwargs = kwargs @property def kwargs(self): return self._kwargs def", "b, a else: x, y = a, b result = NotImplemented try: result", "_version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\":", "if abs_tol is None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex):", "x, y = a, b result = NotImplemented try: result = x.isclose(y, **kwargs)", "= 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\"", "_version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0)", "function.\"\"\" return lambda a, b: a > b or self(a, b) if __name__", ">>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b = type(b) if type_a", "kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a),", "abs(a), abs(b) ) result = abs_result or rel_result except Exception: pass if result", "b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a, b: a <", "much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b: a < b and not", "abs_result = abs_tol is not None and difference <= abs_tol rel_result = rel_tol", "1.0j) False \"\"\" type_a = type(a) type_b = type(b) if type_a != type_b", "LOG = logging.getLogger(\"isclose\") try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError", "**kwargs) except Exception: pass if result is NotImplemented: try: result = y.isclose(x, **kwargs)", "IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b,", "True \"\"\" return self def notclose(self): \"\"\"not close function. >>> myisclose = IsClose()", "closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs = kwargs @property", "abs_tol is None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result", "myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a, b: not self(a,", "not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b: a", "myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>>", ">>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not", "myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs) def", "a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0)", "try: result = x.isclose(y, **kwargs) except Exception: pass if result is NotImplemented: try:", "< b or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a,", "None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif isinstance(a,", "\"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import logging import math import numbers", "and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None", "abs_result or rel_result except Exception: pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\",", "b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b: a > b", "or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b: a", "is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a,", "ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__", ">>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>>", "max( abs(a), abs(b) ) result = abs_result or rel_result except Exception: pass if", "(object,), {\"Version\": lambda self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version =", "difference <= abs_tol rel_result = rel_tol is not None and difference <= rel_tol", "> b and not self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return", "lambda a, b: a < b and not self(a, b) def less_than_or_close(self): \"\"\"less", "rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else", "def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a, b: a < b", "logging.getLogger(\"isclose\") try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError:", "rel_result = rel_tol is not None and difference <= rel_tol * max( abs(a),", "logging import math import numbers LOG = logging.getLogger(\"isclose\") try: import version as _version", "float(abs_tol), ) elif a is b or a == b: result = True", "2021 <NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import logging import math", "a == b: result = True else: difference = abs(a - b) abs_result", "= kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b,", "__call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0,", "def __call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>>", "b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a, b: a >", "1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j)", "return lambda a, b: not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return", "not None and difference <= abs_tol rel_result = rel_tol is not None and", "b and not self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda", "= logging.getLogger(\"isclose\") try: import version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except", "result = True else: difference = abs(a - b) abs_result = abs_tol is", "import math import numbers LOG = logging.getLogger(\"isclose\") try: import version as _version if", "b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b: a < b", "Exception: pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\",", "if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if", "pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None)", "y.isclose(x, **kwargs) except Exception: pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None)", "rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), )", "float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None", "or rel_result except Exception: pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None):", "Exception: pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare", "result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\")", "b: a < b and not self(a, b) def less_than_or_close(self): \"\"\"less or close", "{a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose:", "close function.\"\"\" return lambda a, b: a > b or self(a, b) if", "\"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs =", "ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__ = (\"version\",", "version as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\",", "abs_tol rel_result = rel_tol is not None and difference <= rel_tol * max(", "<= abs_tol rel_result = rel_tol is not None and difference <= rel_tol *", ") elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol", "isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol", ">>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a, b: not", "notclose(self): \"\"\"not close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return", "_version = type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__ = (\"version\", \"isclose\",", "= IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs) def close(self):", "is not None and difference <= abs_tol rel_result = rel_tol is not None", "type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version", "is not None and difference <= rel_tol * max( abs(a), abs(b) ) result", "type_a = type(a) type_b = type(b) if type_a != type_b and issubclass(type_b, type_a):", "= abs_result or rel_result except Exception: pass if result is NotImplemented and not", "rel_tol is not None and difference <= rel_tol * max( abs(a), abs(b) )", "True \"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose =", "rel_result except Exception: pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise", "return lambda a, b: a < b or self(a, b) def much_greater_than(self): \"\"\"definitely", "float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b),", "raise ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self, s: s})()", "is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return", "return lambda a, b: a < b and not self(a, b) def less_than_or_close(self):", "try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol", "return self def notclose(self): \"\"\"not close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose)", "except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__ =", "= kwargs @property def kwargs(self): return self._kwargs def __call__(self, a, b) -> bool:", "function.\"\"\" return lambda a, b: a < b or self(a, b) def much_greater_than(self):", "return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose = IsClose() >>>", ">>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b", "abs_tol is None else float(abs_tol), ) elif a is b or a ==", "and not self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a,", "b) -> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True", "return lambda a, b: a > b and not self(a, b) def greater_than_or_close(self):", "NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real)", "abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result =", "**kwargs) except Exception: pass if result is NotImplemented: rel_tol = kwargs.get(\"rel_tol\", None) abs_tol", "callable(myisclose.close) True \"\"\" return self def notclose(self): \"\"\"not close function. >>> myisclose =", "a, b: a > b and not self(a, b) def greater_than_or_close(self): \"\"\"greater or", "1.0) True \"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function. >>> myisclose", "= abs_tol is not None and difference <= abs_tol rel_result = rel_tol is", "is None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result =", "or close function.\"\"\" return lambda a, b: a < b or self(a, b)", "None and difference <= rel_tol * max( abs(a), abs(b) ) result = abs_result", "result = x.isclose(y, **kwargs) except Exception: pass if result is NotImplemented: try: result", "greater function.\"\"\" return lambda a, b: a > b and not self(a, b)", "= type(b) if type_a != type_b and issubclass(type_b, type_a): x, y = b,", "is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif", "if abs_tol is None else float(abs_tol), ) elif a is b or a", "difference = abs(a - b) abs_result = abs_tol is not None and difference", "except Exception: pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot", "return self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose =", "{b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined", "and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows", "= type(\"_version\", (object,), {\"Version\": lambda self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\")", "= cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if", "= x.isclose(y, **kwargs) except Exception: pass if result is NotImplemented: try: result =", "Exception: pass if result is NotImplemented: try: result = y.isclose(x, **kwargs) except Exception:", "\"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return self def", "{\"Version\": lambda self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\")", ">>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs) def close(self): \"\"\"close function.", "class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None:", "abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif a is b or", "-> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False", "isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic", "IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs", "b) abs_result = abs_tol is not None and difference <= abs_tol rel_result =", "isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a) type_b = type(b) if type_a !=", "pass if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r}", "x, y = b, a else: x, y = a, b result =", "numbers.Real): result = math.isclose( float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol),", "kwargs(self): return self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose", "\"\"\"definitely greater function.\"\"\" return lambda a, b: a > b and not self(a,", "version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>>", "= 0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs)", "b: a > b or self(a, b) if __name__ == \"__main__\": import doctest", "**self._kwargs) def close(self): \"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\"", "callable(myisclose.notclose) True \"\"\" return lambda a, b: not self(a, b) def much_less_than(self): \"\"\"definitely", "import logging import math import numbers LOG = logging.getLogger(\"isclose\") try: import version as", "isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self,", "complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None", "else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a),", "numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is", "float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and", "= type(a) type_b = type(b) if type_a != type_b and issubclass(type_b, type_a): x,", "None: self._kwargs = kwargs @property def kwargs(self): return self._kwargs def __call__(self, a, b)", "as _version if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\", (object,),", "result = abs_result or rel_result except Exception: pass if result is NotImplemented and", "b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0,", "numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol),", "not None and difference <= rel_tol * max( abs(a), abs(b) ) result =", "less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a, b: a < b or", "y = b, a else: x, y = a, b result = NotImplemented", "kwargs.get(\"rel_tol\", None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real):", "\"\"\" return lambda a, b: not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\"", "def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b: a > b and", "= IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a, b: not self(a, b)", "a else: x, y = a, b result = NotImplemented try: result =", "else: x, y = a, b result = NotImplemented try: result = x.isclose(y,", "None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result = math.isclose( float(a), float(b),", "isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j,", "raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol =", "= abs(a - b) abs_result = abs_tol is not None and difference <=", "self(a, b) def less_than_or_close(self): \"\"\"less or close function.\"\"\" return lambda a, b: a", "self, s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a,", "isinstance(b, numbers.Complex): result = cmath.isclose( complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else", "a, b: not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a,", "b: not self(a, b) def much_less_than(self): \"\"\"definitely less function.\"\"\" return lambda a, b:", "-> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\"", "def close(self): \"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return", "a, b result = NotImplemented try: result = x.isclose(y, **kwargs) except Exception: pass", "= a, b result = NotImplemented try: result = x.isclose(y, **kwargs) except Exception:", "difference <= rel_tol * max( abs(a), abs(b) ) result = abs_result or rel_result", "<= rel_tol * max( abs(a), abs(b) ) result = abs_result or rel_result except", "IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a, b: not self(a, b) def", "is None else float(abs_tol), ) elif a is b or a == b:", "* max( abs(a), abs(b) ) result = abs_result or rel_result except Exception: pass", "if not _version.version.is_backwards_compatible_with(\"1.0.0\"): raise ImportError except ImportError: _version = type(\"_version\", (object,), {\"Version\": lambda", "Copyright 2021 <NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import logging import", "None else float(abs_tol), ) elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex): result = cmath.isclose(", "else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is None else float(abs_tol), ) elif a is", "except Exception: pass if result is NotImplemented: try: result = y.isclose(x, **kwargs) except", "0.0 class IsClose: \"\"\"Allows pre-defined closeness on polymorphic isclose.\"\"\" def __init__(self, **kwargs) ->", "function.\"\"\" return lambda a, b: a < b and not self(a, b) def", "True \"\"\" return lambda a, b: not self(a, b) def much_less_than(self): \"\"\"definitely less", "import cmath import logging import math import numbers LOG = logging.getLogger(\"isclose\") try: import", "None else float(abs_tol), ) elif a is b or a == b: result", "isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j) True", "try: result = y.isclose(x, **kwargs) except Exception: pass if result is NotImplemented: rel_tol", "self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply IsClose(). >>> myisclose = IsClose()", "b and not self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda", "**kwargs) -> None: self._kwargs = kwargs @property def kwargs(self): return self._kwargs def __call__(self,", "a > b and not self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\"", "NotImplemented: try: result = y.isclose(x, **kwargs) except Exception: pass if result is NotImplemented:", "on polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs = kwargs @property def", "less function.\"\"\" return lambda a, b: a < b and not self(a, b)", "polymorphic isclose.\"\"\" def __init__(self, **kwargs) -> None: self._kwargs = kwargs @property def kwargs(self):", "<NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import logging import math import", "a, b: a < b and not self(a, b) def less_than_or_close(self): \"\"\"less or", "if result is NotImplemented and not kwargs.get(\"return_NotImplemented\", None): raise TypeError(f\"cannot compare {a!r} and", "a, b: a < b or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\"", "float(a), float(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is", "# Copyright 2021 <NAME> \"\"\"Extension to math.isclose and cmath.isclose.\"\"\" import cmath import logging", "\"\"\"less or close function.\"\"\" return lambda a, b: a < b or self(a,", "\"\"\"Apply IsClose(). >>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a,", "= b, a else: x, y = a, b result = NotImplemented try:", "b or self(a, b) def much_greater_than(self): \"\"\"definitely greater function.\"\"\" return lambda a, b:", "True else: difference = abs(a - b) abs_result = abs_tol is not None", "function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda a, b:", "self(a, b) def greater_than_or_close(self): \"\"\"greater or close function.\"\"\" return lambda a, b: a", "None and difference <= abs_tol rel_result = rel_tol is not None and difference", ">>> myisclose = IsClose() >>> myisclose(1.0, 1.0) True \"\"\" return isclose(a, b, **self._kwargs)", "close(self): \"\"\"close function. >>> myisclose = IsClose() >>> callable(myisclose.close) True \"\"\" return self", "False >>> isclose(1.0j, 1.0j) True >>> isclose(-1.0j, 1.0j) False \"\"\" type_a = type(a)", "None): raise TypeError(f\"cannot compare {a!r} and {b!r}\") return result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol", "\"\"\"not close function. >>> myisclose = IsClose() >>> callable(myisclose.notclose) True \"\"\" return lambda", "complex(a), complex(b), rel_tol=isclose.default_rel_tol if rel_tol is None else float(rel_tol), abs_tol=isclose.default_abs_tol if abs_tol is", "elif a is b or a == b: result = True else: difference", "parameterized isclose. >>> isclose(1.0, 1.0) True >>> isclose(0.0, 1.0) False >>> isclose(1.0j, 1.0j)", "b result = NotImplemented try: result = x.isclose(y, **kwargs) except Exception: pass if", "s: s})() __all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b,", "result isclose.default_rel_tol = 1e-9 isclose.default_abs_tol = 0.0 class IsClose: \"\"\"Allows pre-defined closeness on", "None) abs_tol = kwargs.get(\"abs_tol\", None) try: if isinstance(a, numbers.Real) and isinstance(b, numbers.Real): result", "@property def kwargs(self): return self._kwargs def __call__(self, a, b) -> bool: \"\"\"Apply IsClose().", "\"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) -> bool: \"\"\"polymorphic, parameterized isclose.", "__all__ = (\"version\", \"isclose\", \"IsClose\") version = _version.Version(\"1.1.0\") def isclose(a, b, **kwargs) ->", "NotImplemented try: result = x.isclose(y, **kwargs) except Exception: pass if result is NotImplemented:", "if result is NotImplemented: try: result = y.isclose(x, **kwargs) except Exception: pass if", ">>> callable(myisclose.notclose) True \"\"\" return lambda a, b: not self(a, b) def much_less_than(self):", "result = NotImplemented try: result = x.isclose(y, **kwargs) except Exception: pass if result", "= NotImplemented try: result = x.isclose(y, **kwargs) except Exception: pass if result is", "else float(abs_tol), ) elif a is b or a == b: result =", "type_b = type(b) if type_a != type_b and issubclass(type_b, type_a): x, y =" ]
[ "2 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 |", "required_operation_two(self): pass def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('|", "executando a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou executando", "required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def", "requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida 2", "1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.')", "abstrata | Estou executando a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata", "Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 |", "@abstractmethod def required_operation_two(self): pass def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def", "required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.') def hook1(self): print_concrete('|", "operation_two(): print_abstract('| Classe abstrata | Estou executando a operação 2.') @staticmethod def operation_three():", "def operation_three(): print_abstract('| Classe abstrata | Estou executando a operação 3.') @abstractmethod def", "self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou executando a", "2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou executando a operação 3.')", "2 | Hook 1 implementado.') def run(concrete_class): # Deve receber uma subclasse de", "self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata", "pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 |", "executando a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def", "def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta", "| Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2", "Classe concreta 2 | Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta", "print_abstract('| Classe abstrata | Estou executando a operação 2.') @staticmethod def operation_three(): print_abstract('|", "abstrata | Estou executando a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata", "Classe abstrata | Estou executando a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe", "Estou executando a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass", "implementado.') def run(concrete_class): # Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__", "concreta 1 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1", "required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass def hook2(self): pass class", "def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three()", "def required_operation_two(self): pass def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self):", "abc import ABC, abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def", "2 | Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta 2 |", "self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou", "concreta 2 | Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta 2", "@abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass def hook2(self):", "run(concrete_class): # Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__ == '__main__':", "concreta 2 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2", "self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou executando a operação", "class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def", "receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__ == '__main__': run(ConcreteClass1()) print('') run(ConcreteClass2())", "pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass):", "operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou executando a operação", "1 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 |", "def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.') def hook1(self):", "| Estou executando a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self):", "ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.') def", "self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata |", "print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe", "print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe", "Classe abstrata | Estou executando a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe", "def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.') def required_operation_two(self):", "print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod", "operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou executando a operação", "def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass def hook2(self): pass", "# Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__ == '__main__': run(ConcreteClass1())", "2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida", "executando a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou executando", "operation_one(): print_abstract('| Classe abstrata | Estou executando a operação 1.') @staticmethod def operation_two():", "2 implementada.') def hook1(self): print_concrete('| Classe concreta 2 | Hook 1 implementado.') def", "<gh_stars>0 from abc import ABC, abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class", "implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida 1", "def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two()", "Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta 2 | Hook 1", "@staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou executando a operação 1.') @staticmethod", "1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou executando a operação 2.')", "from abc import ABC, abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC):", "required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('|", "class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.')", "print_concrete('| Classe concreta 2 | Hook 1 implementado.') def run(concrete_class): # Deve receber", "def run(concrete_class): # Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__ ==", "class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.')", "a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou executando a", "hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1", "Estou executando a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou", "hook1(self): print_concrete('| Classe concreta 2 | Hook 1 implementado.') def run(concrete_class): # Deve", "1 implementado.') def run(concrete_class): # Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if", "1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.')", "def operation_one(): print_abstract('| Classe abstrata | Estou executando a operação 1.') @staticmethod def", "pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida 1", "Hook 1 implementado.') def run(concrete_class): # Deve receber uma subclasse de AbstractClass! concrete_class.template_method()", "print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two()", "a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self):", "implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.') class", "Classe concreta 1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('|", "implementada.') def hook1(self): print_concrete('| Classe concreta 2 | Hook 1 implementado.') def run(concrete_class):", "required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('|", "| Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação", "Classe concreta 2 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta", "print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1()", "operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass", "| Hook 1 implementado.') def run(concrete_class): # Deve receber uma subclasse de AbstractClass!", "AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one():", "a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou executando a", "def operation_two(): print_abstract('| Classe abstrata | Estou executando a operação 2.') @staticmethod def", "def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação", "Estou executando a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou", "requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida 2", "| Estou executando a operação 1.') @staticmethod def operation_two(): print_abstract('| Classe abstrata |", "Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida", "concreta 1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe", "print_abstract('| Classe abstrata | Estou executando a operação 1.') @staticmethod def operation_two(): print_abstract('|", "requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta 2 | Hook 1 implementado.')", "ABC, abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one()", "ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.') def", "print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2()", "Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida", "pass def hook1(self): pass def hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe", "implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.') def", "print_abstract('| Classe abstrata | Estou executando a operação 3.') @abstractmethod def required_operation_one(self): pass", "1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta", "print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self):", "requerida 2 implementada.') class ConcreteClass2(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação", "operation_three(): print_abstract('| Classe abstrata | Estou executando a operação 3.') @abstractmethod def required_operation_one(self):", "abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self): self.operation_one() self.required_operation_one()", "template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe", "@staticmethod def operation_two(): print_abstract('| Classe abstrata | Estou executando a operação 2.') @staticmethod", "def required_operation_one(self): print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.') def required_operation_two(self):", "self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou executando", "Deve receber uma subclasse de AbstractClass! concrete_class.template_method() if __name__ == '__main__': run(ConcreteClass1()) print('')", "def template_method(self): self.operation_one() self.required_operation_one() self.operation_two() self.hook1() self.required_operation_two() self.operation_three() self.hook2() @staticmethod def operation_one(): print_abstract('|", "def required_operation_two(self): print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.') class ConcreteClass2(AbstractClass):", "| Operação requerida 2 implementada.') def hook1(self): print_concrete('| Classe concreta 2 | Hook", "3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def required_operation_two(self): pass def hook1(self): pass def", "concreta 2 | Hook 1 implementado.') def run(concrete_class): # Deve receber uma subclasse", "hook2(self): pass class ConcreteClass1(AbstractClass): def required_operation_one(self): print_concrete('| Classe concreta 1 | Operação requerida", "| Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta 2 | Operação", "def hook1(self): print_concrete('| Classe concreta 2 | Hook 1 implementado.') def run(concrete_class): #", "self.hook2() @staticmethod def operation_one(): print_abstract('| Classe abstrata | Estou executando a operação 1.')", "Classe concreta 2 | Hook 1 implementado.') def run(concrete_class): # Deve receber uma", "@staticmethod def operation_three(): print_abstract('| Classe abstrata | Estou executando a operação 3.') @abstractmethod", "print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe", "| Estou executando a operação 2.') @staticmethod def operation_three(): print_abstract('| Classe abstrata |", "import ABC, abstractmethod def print_abstract(string): print(f'\\033[31m{string}\\033[0;0m') def print_concrete(string): print(f'\\033[32m{string}\\033[0;0m') class AbstractClass(ABC): def template_method(self):", "Classe abstrata | Estou executando a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod", "Classe concreta 1 | Operação requerida 1 implementada.') def required_operation_two(self): print_concrete('| Classe concreta", "abstrata | Estou executando a operação 3.') @abstractmethod def required_operation_one(self): pass @abstractmethod def" ]
[ "files aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\")", "{models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder", "only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume =", "output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ #", "pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir", "def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet:", "\"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty dir", "= tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create", "no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name", "Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"],", "dir so no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") #", "getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, )", "BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will only be one", "# Set name prefix for output files aoi_section = ( \"[aoi]\\n\" + f\"name", "from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from", "config_file_path) with open(config_file, \"w\") as in_config_file: # Configure paths paths_section = ( \"[paths]\\n\"", "will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume", "output files aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" )", "Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id =", "# Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder =", "asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments(", "# Create output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create", "results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\",", "Step that generates a point cloud. Supports the following options: - aoiBBox (required)", "{outputDir}\\n\" # Supply empty dir so no errors are generated + f\"rpc_dir =", "options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo,", "', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery in meters", "outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud", "config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: #", "f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output", "import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import", "self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT,", "and Contributors # Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying", "# Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with", "from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from", "in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery in meters per pixel #", "Collection from girder.models.folder import Folder from girder.models.user import User from girder_worker.docker.tasks import docker_run", "# Get point cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo )", "aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc", "output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload", "jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point", "there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path)", ") existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\",", "RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud. Supports the following options: -", "outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path", "files for details ############################################################################### import os import tempfile from typing import Dict from", "import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import Collection from girder.models.folder import", "Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists", "# Get single file, there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile", "{tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files aoi_section = (", "errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix", "Add info for job event listeners job = asyncResult.job job = addJobInfo( job,", "= output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ]", "WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # -", "description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", }", "danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import Collection from girder.models.folder", "pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\",", "Create output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config", "cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection(", "containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add", "paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" #", "class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud. Supports the following options:", "BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import", "import DeviceRequest from girder.models.collection import Collection from girder.models.folder import Folder from girder.models.user import", "be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path,", "None: raise DanesfieldWorkflowException( \"Models folder has not been created and populated\" ) #", "), ) # Add info for job event listeners job = asyncResult.job job", "name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\":", "import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath", ") in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files aoi_section = ( \"[aoi]\\n\"", "existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless)", "= baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\",", "tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will", "ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import Collection", "sample distancy of output imagery in meters per pixel # Default is 0.25", "GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[", "working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\",", "file, there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]),", "+ f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files", "Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir =", "Contributors # Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt", "event listeners job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId, stepName=self.name, workingSetId=baseWorkingSet[\"_id\"], )", "file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as", "getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD,", "config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\")", "Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest", "..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud. Supports", "import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from", "= {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure", "= BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: # Configure paths paths_section =", "created and populated\" ) # Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume =", "Supply empty dir so no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" )", "the roof geon extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia", "), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume,", "DanesfieldWorkflowException( \"Models folder has not been created and populated\" ) # Download models", "+ f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty dir so", "name prefix for output files aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace('", "DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet", "output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"]", "\"name\": \"models\", } ) if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has", "jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info", "extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\"", "generates a point cloud. Supports the following options: - aoiBBox (required) \"\"\" def", "Supports the following options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD)", "\"\"\" Step that generates a point cloud. Supports the following options: - aoiBBox", "(required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc =", "= {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files aoi_section =", "is None: raise DanesfieldWorkflowException( \"Models folder has not been created and populated\" )", "container_path=outputDir) # Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path)", "girder.models.folder import Folder from girder.models.user import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder", "volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume,", "############################################################################### import os import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand", "Copyright Kitware Inc. and Contributors # Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)", "Kitware Inc. and Contributors # Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) #", "DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step", "point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp() outputDirVolume", "user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for job event listeners job =", "has not been created and populated\" ) # Download models folder models_folder =", "populated\" ) # Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder)", "\"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction", "VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume,", "existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path,", "Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"]", "are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for", "parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs", "jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for job event listeners job", "[{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for job event listeners", "= ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply", "= ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground", "# - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(),", ") # Add info for job event listeners job = asyncResult.job job =", "config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file:", "- aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder):", "{baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [", "in_config_file: # Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" +", "for the roof geon extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir =", "so no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set", "import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from ..workflow_step", "point cloud. Supports the following options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless,", "..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from", "= output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [", "0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the", "device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run", "[ # - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir],", "and LICENSE files for details ############################################################################### import os import tempfile from typing import", "on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for job event", "DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import", "roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix", "= ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix =", "def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) #", "baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True,", "parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet)", "self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo)", "= tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there", "outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult", "Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE", "accompanying Copyright.txt and LICENSE files for details ############################################################################### import os import tempfile from", "BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path,", "of output imagery in meters per pixel # Default is 0.25 params_section =", "in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder", "**createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), )", "#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. and", "point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) #", "pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\",", "output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks", "containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # - Fix", "Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder(", "imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for", "outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user,", "girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume,", "import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud. Supports the", "import Folder from girder.models.user import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import", "+ f\"work_dir = {outputDir}\\n\" # Supply empty dir so no errors are generated", "from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class", "= {outputDir}\\n\" # Supply empty dir so no errors are generated + f\"rpc_dir", ") core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne(", "existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id", "\"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if", "configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name,", "folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection,", "Folder from girder.models.user import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import (", "in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction step roof_section = ( \"[roof]\\n\"", "# Default is 0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") #", "paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\"", "Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True,", "+ \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction step", "( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample", "under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files", "Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files for details", "the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files for", "DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume),", "meters per pixel # Default is 0.25 params_section = \"[params]\\n\" + \"gsd =", "addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep", "f\"work_dir = {outputDir}\\n\" # Supply empty dir so no errors are generated +", "Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) #", "Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file,", ") in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None:", "Copyright.txt and LICENSE files for details ############################################################################### import os import tempfile from typing", "= BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume =", "name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs =", "output imagery in meters per pixel # Default is 0.25 params_section = \"[params]\\n\"", "= [ # - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\",", "import os import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from", "typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types", "jobInfo) # Get point cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo", "import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import (", "docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs,", "from girder.models.collection import Collection from girder.models.folder import Folder from girder.models.user import User from", "and populated\" ) # Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder,", "# Ground sample distancy of output imagery in meters per pixel # Default", "( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty", "image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) #", "..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates", "License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files for details ###############################################################################", "\"Models folder has not been created and populated\" ) # Download models folder", "gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working", "outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\") configFileVolume", "from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud.", "aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") #", "( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\"", "Ground sample distancy of output imagery in meters per pixel # Default is", "createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from", "\"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ),", "models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will only be one point_cloud_path", ") in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery in meters per pixel", "in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files aoi_section = ( \"[aoi]\\n\" +", "the following options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def", "point cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection =", "volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on", "# Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder)", "point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) #", "empty dir so no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\")", "from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage", "modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is", "permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder(", "params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof", "pixel # Default is 0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\")", "capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless", "from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common", "VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep,", "\"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # - Fix output folder permissions", ") # Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"],", "2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files for details ############################################################################### import", "public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } )", "step roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" +", "GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments,", "output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file", "resultHooks=resultHooks, ), ) # Add info for job event listeners job = asyncResult.job", "if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, )", "os import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow", "{ \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is None: raise DanesfieldWorkflowException(", "getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point", "danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection", "baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks =", "super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict =", "from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that", "raise DanesfieldWorkflowException( \"Models folder has not been created and populated\" ) # Download", "reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] = output_folder[\"_id\"] WorkingSet().save(baseWorkingSet) containerArgs = [ \"python\",", "= Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"] baseWorkingSet[\"output_folder_id\"] =", "\"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" )", "Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is None: raise", "been created and populated\" ) # Download models folder models_folder = tempfile.mkdtemp() modelsFolderVolume", "tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException", "BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: # Configure paths paths_section = (", "python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. and Contributors", "import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\"", "creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\",", "models folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get", "modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will only", "= {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery", "\"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction step roof_section", "for details ############################################################################### import os import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud", ") from ..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import", ") if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has not been created", "dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file,", "girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from", "See accompanying Copyright.txt and LICENSE files for details ############################################################################### import os import tempfile", "with open(config_file, \"w\") as in_config_file: # Configure paths paths_section = ( \"[paths]\\n\" +", "geon extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon Segmentation", "import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import", "from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import Collection from", "DanesfieldWorkflowException from docker.types import DeviceRequest from girder.models.collection import Collection from girder.models.folder import Folder", "Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set pointCloudWorkingSet: Dict =", "= self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir", "# Copyright Kitware Inc. and Contributors # Distributed under the Apache License, 2.0", "Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), #", "DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder", "modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has not been created and populated\"", ") from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient,", "), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay(", "config_file_path, ] resultHooks = [ # - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD,", "for output files aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\"", "job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId, stepName=self.name, workingSetId=baseWorkingSet[\"_id\"], ) return job", "docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from", "models_folder) # Get single file, there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\")", "not been created and populated\" ) # Download models folder models_folder = tempfile.mkdtemp()", "is 0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for", "job event listeners job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId, stepName=self.name, workingSetId=baseWorkingSet[\"_id\"],", "# Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir", "girder.models.collection import Collection from girder.models.folder import Folder from girder.models.user import User from girder_worker.docker.tasks", "createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities", "{baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery in", "as in_config_file: # Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath = {point_cloud_path}\\n\"", "..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep):", "jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder =", "# See accompanying Copyright.txt and LICENSE files for details ############################################################################### import os import", "{point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty dir so no errors are", "set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(),", "= getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True,", "= getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set pointCloudWorkingSet: Dict = getWorkingSet(", "\"models\", } ) if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has not", "import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet from ..models.workingSet", "createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set pointCloudWorkingSet:", "} ) if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has not been", "], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ),", "# Add info for job event listeners job = asyncResult.job job = addJobInfo(", "] resultHooks = [ # - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\",", "for job event listeners job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId, stepName=self.name,", "details ############################################################################### import os import tempfile from typing import Dict from danesfield_server.algorithms.generate_point_cloud import", "DeviceRequest from girder.models.collection import Collection from girder.models.folder import Folder from girder.models.user import User", "tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: # Configure paths", "\"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy", "LICENSE files for details ############################################################################### import os import tempfile from typing import Dict", "] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ],", "resultHooks = [ # - Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\",", "= \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon", "( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from ..workflow_step import", "dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is", "f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output files aoi_section", "run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get", "from girder.models.user import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder,", "-*- ############################################################################### # Copyright Kitware Inc. and Contributors # Distributed under the Apache", "from ..constants import DanesfieldStep, DockerImage from ..workflow_step import DanesfieldWorkflowStep from ..workflow_utilities import getWorkingSet", "Inc. and Contributors # Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See", "per pixel # Default is 0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\"", "[ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # - Fix output folder", "# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and", "info for job event listeners job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId,", "(apache.org/licenses/LICENSE-2.0) # See accompanying Copyright.txt and LICENSE files for details ############################################################################### import os", "\"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo)", "modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks,", "\"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # - Fix output folder permissions ResultRunDockerCommand(", "import Collection from girder.models.folder import Folder from girder.models.user import User from girder_worker.docker.tasks import", "= tempfile.mkstemp(suffix=\".ini\") configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: # Configure", "volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult =", "core3dCollection = Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( {", "\"w\") as in_config_file: # Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath =", "f\"p3d_fpath = {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty dir so no", "'_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of output imagery in meters per", "listeners job = asyncResult.job job = addJobInfo( job, jobId=jobInfo.jobId, stepName=self.name, workingSetId=baseWorkingSet[\"_id\"], ) return", "= 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction step roof_section =", "tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output", "import getWorkingSet from ..models.workingSet import WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a", "configFileVolume = BindMountVolume(config_file_path, config_file_path) with open(config_file, \"w\") as in_config_file: # Configure paths paths_section", "from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, )", "= [ \"python\", \"/danesfield/tools/run_danesfield.py\", config_file_path, ] resultHooks = [ # - Fix output", "tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path = tempfile.mkstemp(suffix=\".ini\")", "exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\",", "ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\",", "# -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. and Contributors #", "############################################################################### # Copyright Kitware Inc. and Contributors # Distributed under the Apache License,", "existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume,", "gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will only be one point_cloud_path =", "Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id", "reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if", "import Dict from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand from danesfield_server.workflow import DanesfieldWorkflowException from docker.types import", "imagery in meters per pixel # Default is 0.25 params_section = \"[params]\\n\" +", "pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp() outputDirVolume =", "single file, there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0]", "girder.models.user import User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, )", "# Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1,", "if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder has not been created and", "models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file,", "is None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id =", "folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results", "Default is 0.25 params_section = \"[params]\\n\" + \"gsd = 0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters", "= createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set", "( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo,", "# Parameters for the roof geon extraction step roof_section = ( \"[roof]\\n\" +", "baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo) # Get point cloud working set pointCloudWorkingSet: Dict", "= dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\") if existing_folder_id", "open(config_file, \"w\") as in_config_file: # Configure paths paths_section = ( \"[paths]\\n\" + f\"p3d_fpath", "core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is None: raise DanesfieldWorkflowException( \"Models folder", "-*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. and Contributors # Distributed", "Parameters for the roof geon extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir", "workflow on [{baseWorkingSet['name']}]\", jobType=self.name, user=jobInfo.requestInfo.user, resultHooks=resultHooks, ), ) # Add info for job", "- Fix output folder permissions ResultRunDockerCommand( DockerImage.DANESFIELD, command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ),", "= BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single file, there will only be", "command=[\"chown\", \"-R\", f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id,", "Get single file, there will only be one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile =", "Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ] asyncResult = docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])],", "= {point_cloud_path}\\n\" + f\"work_dir = {outputDir}\\n\" # Supply empty dir so no errors", "shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\", containerArgs=containerArgs, jobTitle=f\"Run imageless workflow", "following options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self,", "+ \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") # Ensure folder exists existing_folder_id = baseWorkingSet.get(\"output_folder_id\")", "= docker_run.delay( device_requests=[DeviceRequest(count=-1, capabilities=[[\"gpu\"]])], shm_size=\"8G\", volumes=[ pointCloudFileVolume, configFileVolume, outputDirVolume, modelsFolderVolume, ], **createDockerRunArguments( image=f\"{DockerImage.DANESFIELD}:latest\",", "f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\") #", "from girder.models.folder import Folder from girder.models.user import User from girder_worker.docker.tasks import docker_run from", "import BindMountVolume, VolumePath from danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants", "# Supply empty dir so no errors are generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\"", "Get point cloud working set pointCloudWorkingSet: Dict = getWorkingSet( DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo ) core3dCollection", "User from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms", "= tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir) # Create config file config_file, config_file_path =", "one point_cloud_path = tempfile.mktemp(suffix=\".las\") pointCloudFile = self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path)", "danesfield_server.algorithms.common import ( addJobInfo, createDockerRunArguments, createGirderClient, ) from ..constants import DanesfieldStep, DockerImage from", "from girder_worker.docker.tasks import docker_run from girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import", "prefix for output files aoi_section = ( \"[aoi]\\n\" + f\"name = {baseWorkingSet['name'].replace(' ',", "None: output_folder = Folder().createFolder( parent=core3dCollection, parentType=\"collection\", name=f\"(Imageless) {baseWorkingSet['name']}\", reuseExisting=True, ) existing_folder_id = output_folder[\"_id\"]", "cloud. Supports the following options: - aoiBBox (required) \"\"\" def __init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\")", "Set name prefix for output files aoi_section = ( \"[aoi]\\n\" + f\"name =", "+ f\"name = {baseWorkingSet['name'].replace(' ', '_')}\" ) in_config_file.write(f\"{aoi_section}\\n\") # Ground sample distancy of", "generated + f\"rpc_dir = {tempfile.mkdtemp()}\\n\" ) in_config_file.write(f\"{paths_section}\\n\") # Set name prefix for output", "roof geon extraction step roof_section = ( \"[roof]\\n\" + f\"model_dir = {models_folder}/Columbia Geon", "in meters per pixel # Default is 0.25 params_section = \"[params]\\n\" + \"gsd", "utf-8 -*- ############################################################################### # Copyright Kitware Inc. and Contributors # Distributed under the", "girder_worker.docker.transforms.girder import ( GirderUploadVolumePathToFolder, ) from girder_worker.docker.transforms import BindMountVolume, VolumePath from danesfield_server.algorithms.common import", "= Collection().createCollection( name=\"core3d\", creator=User().getAdmins().next(), description=\"\", public=True, reuseExisting=True, ) modelsFolder = Folder().findOne( { \"parentId\":", "0.25\\n\" in_config_file.write(f\"{params_section}\\n\") # Parameters for the roof geon extraction step roof_section = (", "folder models_folder = tempfile.mkdtemp() modelsFolderVolume = BindMountVolume(models_folder, models_folder) gc.downloadFolderRecursive(modelsFolder[\"_id\"], models_folder) # Get single", "= Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is None:", "BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir)", "distancy of output imagery in meters per pixel # Default is 0.25 params_section", "that generates a point cloud. Supports the following options: - aoiBBox (required) \"\"\"", "coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. and Contributors # Distributed under", "self.getFiles(pointCloudWorkingSet)[0] gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir =", "__init__(self): super(RunDanesfieldImageless, self).__init__(\"Imageless\") self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD) def run(self, jobInfo, outputFolder): gc = createGirderClient(jobInfo.requestInfo) baseWorkingSet: Dict", "gc.downloadFile(str(pointCloudFile[\"_id\"]), point_cloud_path) pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp()", "= BindMountVolume(point_cloud_path, point_cloud_path) # Create output dir outputDir = tempfile.mkdtemp() outputDirVolume = BindMountVolume(host_path=outputDir,", "docker.types import DeviceRequest from girder.models.collection import Collection from girder.models.folder import Folder from girder.models.user", ") modelsFolder = Folder().findOne( { \"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder", "folder has not been created and populated\" ) # Download models folder models_folder", "WorkingSet class RunDanesfieldImageless(DanesfieldWorkflowStep): \"\"\" Step that generates a point cloud. Supports the following", "a point cloud. Supports the following options: - aoiBBox (required) \"\"\" def __init__(self):", "\"parentId\": core3dCollection[\"_id\"], \"name\": \"models\", } ) if modelsFolder is None: raise DanesfieldWorkflowException( \"Models", "f\"{os.getuid()}:{os.getgid()}\", outputDir], volumes=outputDirVolume._repr_json_(), ), # Upload results GirderUploadVolumePathToFolder( VolumePath(\".\", volume=outputDirVolume), existing_folder_id, ), ]", "from docker.types import DeviceRequest from girder.models.collection import Collection from girder.models.folder import Folder from", "+ f\"model_dir = {models_folder}/Columbia Geon Segmentation Model\\n\" + \"model_prefix = dayton_geon\" ) in_config_file.write(f\"{roof_section}\\n\")" ]
[ "models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={", "07:05 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254,", "True dependencies = [ ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True,", "models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={ 'db_table': 'user',", "<gh_stars>1-10 # Generated by Django 3.1.1 on 2020-09-23 07:05 from django.db import migrations,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name',", "], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "= [ ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "}, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', },", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Group',", "Generated by Django 3.1.1 on 2020-09-23 07:05 from django.db import migrations, models import", "('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True,", "models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "# Generated by Django 3.1.1 on 2020-09-23 07:05 from django.db import migrations, models", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Group', fields=[", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table':", "2020-09-23 07:05 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)),", "migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ],", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254,", "3.1.1 on 2020-09-23 07:05 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)),", "[ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)),", "models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT,", "= [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page',", "'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login',", "= True dependencies = [ ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id',", "[ ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "on 2020-09-23 07:05 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ],", "('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={ 'db_table':", "name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email',", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ]", "name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={", "verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User',", "('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={ 'db_table': 'user', },", "('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True,", "migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel(", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ),", "verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active',", "('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={ 'db_table': 'user', }, ), ]", "models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin',", "by Django 3.1.1 on 2020-09-23 07:05 from django.db import migrations, models import django.db.models.deletion", "'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True,", "null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')),", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "dependencies = [ ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group',", "('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)), ('date_joined',", "), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)),", "null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group',", "login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password', models.CharField(max_length=128)), ('is_admin', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=True)),", "('name', models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[", "models.CharField(max_length=254)), ('permission_page', models.TextField(null=True)), ], options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id',", "Django 3.1.1 on 2020-09-23 07:05 from django.db import migrations, models import django.db.models.deletion class", "verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)), ('name', models.CharField(max_length=254, null=True)), ('password',", "models.BooleanField(default=True)), ('date_joined', models.DateField(auto_now=True)), ('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')), ], options={ 'db_table': 'user', }, ),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('email', models.EmailField(max_length=254, unique=True)),", "options={ 'db_table': 'group', }, ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'))," ]
[ "core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import * from rest_framework", "request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer',", "# url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')),", "Uncomment the next two lines to enable the admin: from django.contrib import admin", "machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('',", "SitePlus from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover()", "rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')), )", "enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the", "urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')), ) +", "LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import *", "url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods", "admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back to the", "next two lines to enable the admin: from django.contrib import admin # This", "import admin # This is the generated API from xosapi import * from", "django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request):", "XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back to", "import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import * from rest_framework import", "'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the", "API from xosapi import * from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView", "next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), #", "url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line", "include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^',", "#from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a", "from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from", "in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')),", "import patterns, include, url # Uncomment the next two lines to enable the", "ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to enable admin", "core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request", "to the apache server that is running on the machine \"\"\" apache_url =", "# Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')),", "HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid',", "return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'),", "ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import * from rest_framework import generics", "import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back", "#Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/', include('core.xoslib.methods',", "url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to enable", "admin # This is the generated API from xosapi import * from core.views.legacyapi", "Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), #", "enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"),", "* from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView", "redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/',", "line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$',", "redirect_to_apache(request): \"\"\" bounce a request back to the apache server that is running", "Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home',", "request back to the apache server that is running on the machine \"\"\"", "the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'),", "a request back to the apache server that is running on the machine", "include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(),", "url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding", "(request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer',", "= \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats',", "from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site", "server that is running on the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'],", "import * from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import", "url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/',", "include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')),", "from django.conf.urls import patterns, include, url # Uncomment the next two lines to", "from xosapi import * from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from", "\"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats',", "core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models", "url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment", "the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns =", "two lines to enable the admin: from django.contrib import admin # This is", "the next two lines to enable the admin: from django.contrib import admin #", "from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def", "core.views.analytics import AnalyticsAjaxView from core.models import * from rest_framework import generics from core.dashboard.sites", "name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls',", "patterns, include, url # Uncomment the next two lines to enable the admin:", "from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import * from", "the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the", "# url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache),", "back to the apache server that is running on the machine \"\"\" apache_url", "that is running on the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path)", "= SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back to the apache", "\"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', #", "import AnalyticsAjaxView from core.models import * from rest_framework import generics from core.dashboard.sites import", "name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest", "# Uncomment the next two lines to enable the admin: from django.contrib import", "name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to", "url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$',", "to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(),", "import SitePlus from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus()", "name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc", "bounce a request back to the apache server that is running on the", "= patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'),", "to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable", "AnalyticsAjaxView from core.models import * from rest_framework import generics from core.dashboard.sites import SitePlus", "def redirect_to_apache(request): \"\"\" bounce a request back to the apache server that is", "to enable the admin: from django.contrib import admin # This is the generated", "url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib rest methods url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')), ) + get_REST_patterns()", "name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework", "lines to enable the admin: from django.contrib import admin # This is the", "HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce", "# Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), #", "name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below to enable admin documentation:", "admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin:", "url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'),", "running on the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url)", "from django.contrib import admin # This is the generated API from xosapi import", "patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/',", "import * from rest_framework import generics from core.dashboard.sites import SitePlus from django.http import", "url # Uncomment the next two lines to enable the admin: from django.contrib", "* from rest_framework import generics from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect", "url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)),", "import generics from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from core.xoslib import", "django.contrib import admin # This is the generated API from xosapi import *", "apache server that is running on the machine \"\"\" apache_url = \"http://%s%s\" %", "apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples:", "'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/',", "include, url # Uncomment the next two lines to enable the admin: from", "xosapi import * from core.views.legacyapi import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics", "admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC',", "the apache server that is running on the machine \"\"\" apache_url = \"http://%s%s\"", "SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back to the apache server", "# Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)),", "on the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns", "Uncomment the admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment", "urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'), url(r'^observer', 'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(),", "'core.views.observer.Observer', name='observer'), url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'), url(r'^docs/', include('rest_framework_swagger.urls')), # Uncomment the admin/doc line below", "url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), #", "from core.models import * from rest_framework import generics from core.dashboard.sites import SitePlus from", "#from core.views.analytics import AnalyticsAjaxView from core.models import * from rest_framework import generics from", "url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls", "AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')), # XOSLib", "% (request.META['HOSTNAME'], request.path) return HttpResponseRedirect(apache_url) urlpatterns = patterns('', # Examples: url(r'^stats', 'core.views.stats.Stats', name='stats'),", "include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), #", "core.models import * from rest_framework import generics from core.dashboard.sites import SitePlus from django.http", "\"\"\" bounce a request back to the apache server that is running on", "This is the generated API from xosapi import * from core.views.legacyapi import LegacyXMLRPC", "core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site =", "enable the admin: from django.contrib import admin # This is the generated API", "documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/',", "is the generated API from xosapi import * from core.views.legacyapi import LegacyXMLRPC from", "admin.autodiscover() def redirect_to_apache(request): \"\"\" bounce a request back to the apache server that", "'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in rest_framework urls url(r'^xos/',", "the admin: from django.contrib import admin # This is the generated API from", "admin: from django.contrib import admin # This is the generated API from xosapi", "the generated API from xosapi import * from core.views.legacyapi import LegacyXMLRPC from core.views.services", "line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line", "rest_framework import generics from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from core.xoslib", "XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"), url(r'^files/', redirect_to_apache), #Adding in", "below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to", "generics from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from core.xoslib import XOSLibDataView", "is running on the machine \"\"\" apache_url = \"http://%s%s\" % (request.META['HOSTNAME'], request.path) return", "#url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'), # url(r'^analytics/(?P<name>\\w+)/$', AnalyticsAjaxView.as_view(), name=\"analytics\"),", "admin/doc line below to enable admin documentation: url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next", "django.conf.urls import patterns, include, url # Uncomment the next two lines to enable", "# This is the generated API from xosapi import * from core.views.legacyapi import", "from rest_framework import generics from core.dashboard.sites import SitePlus from django.http import HttpResponseRedirect #from", "the admin: url(r'^admin/', include(admin.site.urls)), url(r'^', include(admin.site.urls)), #url(r'^profile/home', 'core.views.home'), # url(r'^admin/xoslib/(?P<name>\\w+)/$', XOSLibDataView.as_view(), name=\"xoslib\"), url(r'^xmlrpc/legacyapi/$',", "import HttpResponseRedirect #from core.xoslib import XOSLibDataView admin.site = SitePlus() admin.autodiscover() def redirect_to_apache(request): \"\"\"", "generated API from xosapi import * from core.views.legacyapi import LegacyXMLRPC from core.views.services import", "import LegacyXMLRPC from core.views.services import ServiceGridView #from core.views.analytics import AnalyticsAjaxView from core.models import" ]
[ "= relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb", "= None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head,", "[] part_token_nums = int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if i !=", "'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\")", "= token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as", "removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2]", "pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {} tokens retrieved at lease", ": 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples", "in nodes: nodes.append(head) if tail not in nodes: nodes.append(tail) # add edge edges.append([head,", "train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') #", "entity string, if present. :param s: Entity string. :return: Entity string with part-of-speech", "str(idx)) # f_r.write('\\n') # id2entity = {v:k for k,v in entity2id.items()} # id2relation", "{v:k for k,v in entity2id.items()} # id2relation = {v:k for k,v in relation2id.items()}", "default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path',", "args.no_stopwords and token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1", "= triple[0], triple[1], triple[2] # if head not in entity2id.keys(): # entity2id[head] =", "f_r.write('\\n') # id2entity = {v:k for k,v in entity2id.items()} # id2relation = {v:k", "parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str,", "relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and", "# return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation", "relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb,", "token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token set.')", "s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb,", "all_token_set = set() for sample in train_samples + dev_samples: for token in sample['query_tokens']", "edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges,", "in core_entitys: if len(entity) < min_len: min_len = len(entity) min_entity = entity core_entity", "'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') # with", "or entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" +", "f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n') # id2entity = {v:k for", "# with open(relation_path, 'w') as f_r: # for relation, idx in relation2id.items(): #", "of entities and triples\"\"\" # entity2id = {} # relation2id = {} #", "+ \" \" + str(idx)) # f_r.write('\\n') # id2entity = {v:k for k,v", "graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for", "as f: for line in f.readlines(): ls = line.split(\" \") # pass first", "tqdm from nltk.corpus import wordnet as wn from multiprocessing import Pool logging.basicConfig(format =", "random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities and relations(all ConceptNet) # entity2id,", "triple[2] if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in", "triple[2] if token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit retrieved", "path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str,", "+ sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token set.') # load stopwords", "tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1", "= s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and", "train_samples + dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization", "retrieving token graphs, combine all result...') token2datas = {} for i in range(PROCESSES):", "parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file", "all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for", "logger.info(\"Finished mapping of relations and entities.\") # get concept mapping logger.info(\"get concept mapping...\")", "= {} relation2emb = {} for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"]", "{} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in tqdm(token_part): if", "line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing:", "= all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums:", "data[\"graph_triples\"] for triple in graph_triples: head, rel, tail = triple[0], triple[1], triple[2] if", "head, rel, tail = triple[0], triple[1], triple[2] if token in head.split(\"_\") or token", "pickled samples logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples =", "[] edges = [] edges_attr = [] token_triples = [] for triple in", "embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as", "parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path',", "triples.append(triple) # limit retrieved knowledge here if len(triples) > limit: break if token", "return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file", "open(relation_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\" \")", "edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes,", "in train_samples + dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making", "rel, tail = triple[0], triple[1], triple[2] if token in head.split(\"_\") or token in", "\" \" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved", "args.relation_path) # load pickled samples logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token,", "token in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the shortest core_entitys =", "type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore", "triple[0], triple[1], triple[2] if cur_head == head or cur_head == tail or cur_tail", "# build mappings of entities and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation", "parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str,", "len(relation2id) # with open(entity_path, 'w') as f_e: # for entity, idx in entity2id.items():", "entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {} relation2emb", "dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into", "= list(all_token_set) # split all_token_set to processes parts and deal with multi-processing all_token_parts", "words with length <= ignore_length') args = parser.parse_args() # load ConceptNet here logger.info(\"Begin", "{} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as f: for line in", "pass first line if len(ls) <= 1: continue rel = ls[0].strip() idx =", "\"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb),", "\" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used, {}/{}", "cur_triple: continue head, rel, tail = triple[0], triple[1], triple[2] if cur_head == head", "limit: break if token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) #", "for sample in train_samples + dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token)", "100 f.write(rel + \" \" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all", "trim the \"/c/en/\" and just get the entity name, convert all to -", "knowledge here if len(triples) > limit: break if token in head.split(\"_\"): core_entitys.add(head) if", "get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to load tokenization results...') train_samples =", "emb in entity2emb.items(): assert len(emb) == 100 if entity == \"\" or entity", "Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity name, convert all", "= len(entity2id) # if rel not in relation2id.keys(): # relation2id[rel] = len(relation2id) #", "= ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def search_triples(token,", "for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join()", "triple in graph_triples: head, rel, tail = triple[0], triple[1], triple[2] if head not", "args,)) p.close() p.join() logger.info(\"all processes done!\") # combine all results logger.info('Finished retrieving token", "p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") # combine", "triple[2] # remove empty entity triple if head == \"\" or head ==", "set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet", "and triples\"\"\" # entity2id = {} # relation2id = {} # for triple", "args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and entities.\") # get concept mapping", "logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'),", "ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path,", "\"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return", "# entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of", "# for entity, idx in entity2id.items(): # f_e.write(entity + \" \" + str(idx))", "logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = []", "help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str,", "\": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str, emb)) +", "< min_len: min_len = len(entity) min_entity = entity core_entity = min_entity else: core_entity", "f: for line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\"", "in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if len(triples) > limit: break", "and just get the entity name, convert all to - Lowercase for uniformity.", "in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token set.') #", "triple[1], triple[2] # remove empty entity triple if head == \"\" or head", "# stopword_cnt = 0 # punctuation_cnt = 0 all_token_set = list(all_token_set) # split", "for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if", "= line.split(\" \") # pass first line if len(ls) <= 1: continue entity", "parts and deal with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES)", "set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) #", "in relation2emb.items(): assert len(emb) == 100 f.write(rel + \" \" + \" \".join(map(str,", "in conceptnet_triples: if triple == cur_triple: continue head, rel, tail = triple[0], triple[1],", "in range(PROCESSES): if i != PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums:", "return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys =", "entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb in entity2emb.items():", "id2entity = {v:k for k,v in entity2id.items()} # id2relation = {v:k for k,v", "1: continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id,", "= int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\"", "search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2]", "token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as", "\" + \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f:", "path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int,", "tail = triple[0], triple[1], triple[2] # if head not in entity2id.keys(): # entity2id[head]", "== cur_triple: continue head, rel, tail = triple[0], triple[1], triple[2] if cur_head ==", "# punctuation_cnt += 1 continue if args.no_stopwords and token in stopwords: logger.info('{} is", "logger.info('Finished making tokenization results into token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english'))", "as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {}", "sample in train_samples + dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished", "ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple)", "\"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\",", "retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity", "entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set()", "utf-8 -*- ''' @File : retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time", "\" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities", "cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple == cur_triple: continue head,", "into token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.')", "entity2id = {} # relation2id = {} # for triple in triples: #", "f: for line in f.readlines(): ls = line.split(\" \") # pass first line", "= ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():", "[] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if", "+= 1 continue if args.no_stopwords and token in stopwords: logger.info('{} is stopword, skipped!'.format(token))", "f_r: # for relation, idx in relation2id.items(): # f_r.write(relation + \" \" +", "in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose", "triple[1], triple[2] if token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit", "logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str, emb)) + \"\\n\")", "entity2id.items(): # f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n') # with open(relation_path,", "<= 1: continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return", "retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas =", "{} for token in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token))", "in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if", "= [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples:", "fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb')", "part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes", "head, rel, tail = triple[0], triple[1], triple[2] # remove empty entity triple if", "> 0 and len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue #", "token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb')", "remove empty entity triple if head == \"\" or head == \" \":", "= set() for sample in train_samples + dev_samples: for token in sample['query_tokens'] +", "sub-graph...') # token2graph = dict() # stopword_cnt = 0 # punctuation_cnt = 0", "conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples)", "s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve", "= triple[0], triple[1], triple[2] # remove empty entity triple if head == \"\"", "'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\" \") #", "entity2id.keys(): # entity2id[head] = len(entity2id) # if tail not in entity2id.keys(): # entity2id[tail]", "len(emb) == 100 f.write(rel + \" \" + \" \".join(map(str, emb)) + \"\\n\")", "min_entity = core_entitys[0] for entity in core_entitys: if len(entity) < min_len: min_len =", "parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop',", "tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization", "help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb", "entity2emb.items(): assert len(emb) == 100 if entity == \"\" or entity == \"", "all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ]", "\" \" + str(idx)) # f_r.write('\\n') # id2entity = {v:k for k,v in", "# pass first line if len(ls) <= 1: continue entity = ls[0].strip() idx", "to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished", "concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled", "not in entity2id.keys(): # entity2id[head] = len(entity2id) # if tail not in entity2id.keys():", "- %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger =", "assert len(emb) == 100 f.write(rel + \" \" + \" \".join(map(str, emb)) +", "if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb:", "neighbor triples and build sub-graph...') # token2graph = dict() # stopword_cnt = 0", "token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr,", "encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\",\") head = ls[0].strip()", "entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb))) if __name__ ==", "edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\"", "args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas", "triple[2] if cur_head == head or cur_head == tail or cur_tail == head", "token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr)", "<EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2.", "triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path):", "= (head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = []", "head = ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple = (head, rel,", "and relation embeddings\"\"\" entity2emb = {} relation2emb = {} for token, data in", "for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings of", "{} for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples:", "= retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for", "= 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as", "Entity string. :return: Entity string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or", "fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding", "open(entity_path, 'w') as f_e: # for entity, idx in entity2id.items(): # f_e.write(entity +", "if len(triples) > limit: break if token in head.split(\"_\"): core_entitys.add(head) if token in", "here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"]", "entity core_entity = min_entity else: core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple,", "tail = triple[0], triple[1], triple[2] if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]]", "os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph logger.info('Begin to retrieve neighbor triples", "int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for line in", "edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)),", "load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory if", "\" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel,", "+ \" \" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{}", "token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples,", "break if token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define", "idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls =", "tokens graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {}", "encoding='utf-8') as f: for entity, emb in entity2emb.items(): assert len(emb) == 100 if", "= pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w',", "\") # pass first line if len(ls) <= 1: continue entity = ls[0].strip()", "load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading", "\".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb", "= [] edges = [] edges_attr = [] token_triples = [] for triple", "token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') #", "argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token", "rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\",", "None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel,", ": [num_edges, num_edge_features] nodes = [] edges = [] edges_attr = [] token_triples", "retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb))) if __name__", "- Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and just get", "combine all results logger.info('Finished retrieving token graphs, combine all result...') token2datas = {}", "encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity name,", "rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def", "build graph for token here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples)", "token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples: head, rel,", "# load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading", "0 and len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue # build", "len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys:", "fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes", "= {v:k for k,v in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def", "for k,v in entity2id.items()} # id2relation = {v:k for k,v in relation2id.items()} #", "default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords')", "en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\")", "too short, skipped!'.format(token)) continue # build graph for token here nodes, edges, edges_attr,", "encoding=\"utf-8\") as f: for rel, emb in relation2emb.items(): assert len(emb) == 100 f.write(rel", "triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path',", "with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\",\")", "set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk", "{}/{} retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb))) if", "head not in nodes: nodes.append(head) if tail not in nodes: nodes.append(tail) # add", "len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples,", "for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin)", "import random import pickle import argparse import os import nltk import logging import", "logger.info('Begin to retrieve neighbor triples and build sub-graph...') # token2graph = dict() #", "not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] =", "0 all_token_set = list(all_token_set) # split all_token_set to processes parts and deal with", "\"/c/en/\" and just get the entity name, convert all to - Lowercase for", "ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech", "graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved", "min_len: min_len = len(entity) min_entity = entity core_entity = min_entity else: core_entity =", "\"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import", "english triples.') logger.info(\"sample five triples...\") for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple)", "if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s", "entity, idx in entity2id.items(): # f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n')", "continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple)", "{}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in tqdm(token_part): if token in set(string.punctuation):", "emb in relation2emb.items(): assert len(emb) == 100 f.write(rel + \" \" + \"", "edges = [] edges_attr = [] token_triples = [] for triple in triples:", "encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and", "open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\",\") head", "in triples: # head, rel, tail = triple[0], triple[1], triple[2] # if head", "= 0 # punctuation_cnt = 0 all_token_set = list(all_token_set) # split all_token_set to", "triple[0], triple[1], triple[2] # remove empty entity triple if head == \"\" or", "nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def", "five triples...\") for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build", "with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs", "for rel, emb in relation2emb.items(): assert len(emb) == 100 f.write(rel + \" \"", "if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity in", "# limit retrieved knowledge here if len(triples) > limit: break if token in", "as f: for rel, emb in relation2emb.items(): assert len(emb) == 100 f.write(rel +", "= list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for", "triples and build sub-graph...') # token2graph = dict() # stopword_cnt = 0 #", "results logger.info('Finished retrieving token graphs, combine all result...') token2datas = {} for i", "part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p =", "tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r',", "\"\" or entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \"", "entities.\") # get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path)", "{}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path,", "logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb =", "'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} /", "in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length", "at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in tqdm(token_part): if token", "ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping", "\"\" or tail == \" \": continue # add nodes if head not", "graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save", "preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and just", "= {v:k for k,v in entity2id.items()} # id2relation = {v:k for k,v in", "punctuation_cnt = 0 all_token_set = list(all_token_set) # split all_token_set to processes parts and", "data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples: head, rel, tail", "part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to", "in relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as f_e: #", "len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity =", ":return: Entity string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or", "process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in tqdm(token_part): if token in", "1: continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path,", "token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here", "in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]]", "as f_r: # for relation, idx in relation2id.items(): # f_r.write(relation + \" \"", "line in f.readlines(): ls = line.split(\" \") # pass first line if len(ls)", "part_token_nums = int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if i != PROCESSES", "f.write(rel + \" \" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG,", "nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert", "relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished", "line if len(ls) <= 1: continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity]", "type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt',", "relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {} relation2emb =", "== \" \": continue # add nodes if head not in nodes: nodes.append(head)", "skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length > 0 and len(token) <=", "type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl',", "= 0 all_token_set = list(all_token_set) # split all_token_set to processes parts and deal", "conceptnet_triples: if triple == cur_triple: continue head, rel, tail = triple[0], triple[1], triple[2]", "relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return", "entity and relation embeddings\"\"\" entity2emb = {} relation2emb = {} for token, data", "token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token", "main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token',", "file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path',", "with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split('\\t')", "triple in triples: # head, rel, tail = triple[0], triple[1], triple[2] # if", "-*- encoding: utf-8 -*- ''' @File : retrieve_1hop.py @Author : yyhaker @Contact :", "Entity string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\")", "s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation", "triples: # head, rel, tail = triple[0], triple[1], triple[2] # if head not", "relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() #", "punctuation_cnt += 1 continue if args.no_stopwords and token in stopwords: logger.info('{} is stopword,", "pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas,", "all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES))", "'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation", "\" \": continue if tail == \"\" or tail == \" \": continue", "with open(entity_path, 'w') as f_e: # for entity, idx in entity2id.items(): # f_e.write(entity", "i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities", "for triple in triples: # head, rel, tail = triple[0], triple[1], triple[2] #", "= idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = []", "entity in core_entitys: if len(entity) < min_len: min_len = len(entity) min_entity = entity", "result...') token2datas = {} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as", "head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples", "all_token_set.add(token) logger.info('Finished making tokenization results into token set.') # load stopwords stopwords =", "1 continue if args.no_stopwords and token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) #", "if cur_head == head or cur_head == tail or cur_tail == head or", "with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i,", "# -*- encoding: utf-8 -*- ''' @File : retrieve_1hop.py @Author : yyhaker @Contact", "type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output", "mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to load", "for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples: head,", "if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple) return", "PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set", "as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with", "ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to", "# remove empty entity triple if head == \"\" or head == \"", "pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build token", "if token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define core", "relation embeddings\"\"\" entity2emb = {} relation2emb = {} for token, data in token2datas.items():", "in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the shortest core_entitys = list(core_entitys)", "Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y", "processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples,", "conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity", "for line in f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel = ls[1].strip()", "= (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data", "sys.path.append(\".\") import random import pickle import argparse import os import nltk import logging", "embeddings\"\"\" entity2emb = {} relation2emb = {} for token, data in token2datas.items(): graph_triples", "nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] =", "in triples: head, rel, tail = triple[0], triple[1], triple[2] # remove empty entity", "entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\")", "logger.info(\"combine all results done!\") logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas),", "\" \" + \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as", "help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with", "default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words", "= pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build", "making tokenization results into token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished", "# get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) #", "(1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import random", "relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id =", "rel, tail = triple[0], triple[1], triple[2] # remove empty entity triple if head", "open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index))", "s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s def", "logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S',", "logger.info(triple) # # build mappings of entities and relations(all ConceptNet) # entity2id, id2entity,", "relations and entities.\") # get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id =", "if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb:", "to trim the \"/c/en/\" and just get the entity name, convert all to", "entities and triples\"\"\" # entity2id = {} # relation2id = {} # for", "- Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity name, convert", "def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set')", "stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length') args = parser.parse_args()", "build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities and triples\"\"\" # entity2id =", "tail or cur_tail == head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def", "continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id", "string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or", "conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2] if token in head.split(\"_\") or", "'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s):", "return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge", "import string from tqdm import tqdm from nltk.corpus import wordnet as wn from", "\" \": continue # add nodes if head not in nodes: nodes.append(head) if", "conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities and triples\"\"\"", "core_entitys = set() # search triples for triple in conceptnet_triples: head, rel, tail", "line.split(\" \") # pass first line if len(ls) <= 1: continue entity =", "level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples", "else: core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples =", "- %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES", "path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path',", "== \"\" or entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \"", "entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in", "entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as", "cur_head == head or cur_head == tail or cur_tail == head or cur_tail", "with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id,", "from tqdm import tqdm from nltk.corpus import wordnet as wn from multiprocessing import", "parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str,", "core_entitys.add(tail) # define core entity, choose the shortest core_entitys = list(core_entitys) if len(core_entitys)", "head, rel, tail = triple[0], triple[1], triple[2] # if head not in entity2id.keys():", "idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20):", "%(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES =", "open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') #", "triples and build sub-graph logger.info('Begin to retrieve neighbor triples and build sub-graph...') #", "dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\")", "edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges, edges_attr,", "combine all result...') token2datas = {} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)),", "train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt',", "graphs, combine all result...') token2datas = {} for i in range(PROCESSES): with open(os.path.join(args.output_dir,", "# for triple in triples: # head, rel, tail = triple[0], triple[1], triple[2]", "f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove", "= data[\"graph_triples\"] for triple in graph_triples: head, rel, tail = triple[0], triple[1], triple[2]", "# relation2id = {} # for triple in triples: # head, rel, tail", "{} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as", "2. sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import random import pickle", "f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb,", "\"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path):", "s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb", "= random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities and relations(all ConceptNet) #", "list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity", "== len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity", "short, skipped!'.format(token)) continue # build graph for token here nodes, edges, edges_attr, token_triples,", "the shortest core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity", "= build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] =", "= {} # for triple in triples: # head, rel, tail = triple[0],", "or head == \" \": continue if tail == \"\" or tail ==", "ignore_length') args = parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples", "pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as", "in entity2id.items(): # f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n') # with", "= {} for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in", "logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample", "tail = triple[0], triple[1], triple[2] if token in head.split(\"_\") or token in tail.split(\"_\"):", "relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb in", "range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all", "deal with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES) for i", "length <= ignore_length') args = parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept", "# edge : [2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes = []", "# f_e.write('\\n') # with open(relation_path, 'w') as f_r: # for relation, idx in", "[num_edges, num_edge_features] nodes = [] edges = [] edges_attr = [] token_triples =", "Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and", "Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and just get the", "in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]]", "and entities.\") # get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path,", "parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str,", "# build token set all_token_set = set() for sample in train_samples + dev_samples:", "and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path,", "part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s", "cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with", "and deal with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES) for", "an entity string, if present. :param s: Entity string. :return: Entity string with", "nltk.corpus import wordnet as wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s -", "not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser =", "''' @File : retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time : 2020/04/07", "retrieved knowledge here if len(triples) > limit: break if token in head.split(\"_\"): core_entitys.add(head)", "for i in range(PROCESSES): if i != PROCESSES - 1: cur_token_set = all_token_set[i", "tail = ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def", "relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb')", "[num_nodes, num_node_features] # edge : [2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes", "python # -*- encoding: utf-8 -*- ''' @File : retrieve_1hop.py @Author : yyhaker", "#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : retrieve_1hop.py @Author :", "entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if", "entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with", "in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt +=", "+ \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for", "if entity == \"\" or entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity", "= {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity", "for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token", "tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple) return en_triples def", "in entity2id.items()} # id2relation = {v:k for k,v in relation2id.items()} # return entity2id,", "emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\")", "= ls[1].strip() tail = ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples", "in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser()", "split all_token_set to processes parts and deal with multi-processing all_token_parts = [] part_token_nums", "triple == cur_triple: continue head, rel, tail = triple[0], triple[1], triple[2] if cur_head", "extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line", "# f_r.write('\\n') # id2entity = {v:k for k,v in entity2id.items()} # id2relation =", "f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as", "+ str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as f_r: # for relation,", "\"\"\" Deletes part-of-speech encoding from an entity string, if present. :param s: Entity", "dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas", "= token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved", "conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") # combine all results logger.info('Finished", "relation2id[rel] = idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples =", "'w') as f_r: # for relation, idx in relation2id.items(): # f_r.write(relation + \"", "tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features]", "= set() # search triples for triple in conceptnet_triples: head, rel, tail =", "retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at", "not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph logger.info('Begin to retrieve", "in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\")", "edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return", "= ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples,", "assert len(emb) == 100 if entity == \"\" or entity == \" \":", "and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) #", "build token set all_token_set = set() for sample in train_samples + dev_samples: for", "triple[0], triple[1], triple[2] if token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) #", "= int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if i != PROCESSES -", "if len(entity) < min_len: min_len = len(entity) min_entity = entity core_entity = min_entity", "skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords and token in stopwords: logger.info('{}", "# f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n') # with open(relation_path, 'w')", "args.relation_path) # logger.info(\"Finished mapping of relations and entities.\") # get concept mapping logger.info(\"get", "entity: {}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str, emb)) + \"\\n\") with", "len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token", "\"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas =", "dict() # stopword_cnt = 0 # punctuation_cnt = 0 all_token_set = list(all_token_set) #", "rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path,", "head, rel, tail = triple[0], triple[1], triple[2] if head not in entity2emb: entity2emb[head]", "def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for", "token in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt", "core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0],", "build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args):", "mapping of relations and entities.\") # get concept mapping logger.info(\"get concept mapping...\") entity2id,", "nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples)", "mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph", "/ {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb')", "if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph logger.info('Begin to", "# if tail not in entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel", "\": continue if tail == \"\" or tail == \" \": continue #", "entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" + \"", "\"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {} relation2emb = {} for token,", "in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples: head, rel, tail =", "relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and entities.\")", "'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\",\") head =", "@Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph", "empty entity triple if head == \"\" or head == \" \": continue", "default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb", "concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to", "build sub-graph...') # token2graph = dict() # stopword_cnt = 0 # punctuation_cnt =", "rel, emb in relation2emb.items(): assert len(emb) == 100 f.write(rel + \" \" +", "token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if len(triples) > limit:", "logger.info('{} is too short, skipped!'.format(token)) continue # build graph for token here nodes,", "encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\" \") # pass", "shortest core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity =", "for triple in conceptnet_triples: if triple == cur_triple: continue head, rel, tail =", "# entity2id[tail] = len(entity2id) # if rel not in relation2id.keys(): # relation2id[rel] =", "tail = triple[0], triple[1], triple[2] # remove empty entity triple if head ==", "entity triple if head == \"\" or head == \" \": continue if", "def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id = {} relation2id", "def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge : [2,", "{} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token]", "f.readlines(): ls = line.split(\" \") # pass first line if len(ls) <= 1:", "with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results", "for token here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data =", "mappings of entities and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples,", "nodes: nodes.append(head) if tail not in nodes: nodes.append(tail) # add edge edges.append([head, tail])", "= len(entity) min_entity = entity core_entity = min_entity else: core_entity = None return", "open(entity_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\" \")", "\"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in", "'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/')", "s: Entity string. :return: Entity string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\")", "id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id = {}", "with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb in relation2emb.items(): assert len(emb)", "load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept", "graph for token here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data", "f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path,", "= [] token_triples = [] for triple in triples: head, rel, tail =", "# # build mappings of entities and relations(all ConceptNet) # entity2id, id2entity, relation2id,", "triples.') logger.info(\"sample five triples...\") for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) #", "head not in entity2id.keys(): # entity2id[head] = len(entity2id) # if tail not in", "load pickled samples logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples", "logger.info('Finished retrieving token graphs, combine all result...') token2datas = {} for i in", "# pass first line if len(ls) <= 1: continue rel = ls[0].strip() idx", "[] for triple in triples: head, rel, tail = triple[0], triple[1], triple[2] #", "= pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb =", "emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir',", "= core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout)", "\" + str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as f_r: # for", "return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping", "import argparse import os import nltk import logging import string from tqdm import", "{} # relation2id = {} # for triple in triples: # head, rel,", "run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in", "logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin", "not in nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel)", "Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower()", "logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb'))", "triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail =", "2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3.", "\"\"\" Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\"", "relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def", "graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity string, if", "token2datas.items(): graph_triples = data[\"graph_triples\"] for triple in graph_triples: head, rel, tail = triple[0],", "wordnet as wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s -", "# head, rel, tail = triple[0], triple[1], triple[2] # if head not in", "min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys: if len(entity) <", "'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir,", "1 continue if args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{} is too", "to processes parts and deal with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set)", "if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue", "# define core entity, choose the shortest core_entitys = list(core_entitys) if len(core_entitys) !=", "or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id,", "core_entity = min_entity else: core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples):", "of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str,", "1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\")", "graph_triples: head, rel, tail = triple[0], triple[1], triple[2] if head not in entity2emb:", "tail not in entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel not in", "- %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO)", "cur_tail == head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图,", "conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines():", "return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities and", "args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") # combine all", "line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding. -", "= {} # relation2id = {} # for triple in triples: # head,", "edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes,", "== \"\" or head == \" \": continue if tail == \"\" or", "with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES) for i in", "part-of-speech encoding. - Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity", "token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in)", "add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr)", "p.close() p.join() logger.info(\"all processes done!\") # combine all results logger.info('Finished retrieving token graphs,", "# \"\"\"build mapping of entities and triples\"\"\" # entity2id = {} # relation2id", "parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple", "type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of", "= int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for line", "= {} for token in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation,", "retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1)", "parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <=", "results into token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords", "= dict() # stopword_cnt = 0 # punctuation_cnt = 0 all_token_set = list(all_token_set)", "in entity2emb.items(): assert len(emb) == 100 if entity == \"\" or entity ==", "= [] part_token_nums = int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if i", "relation2id = {} # for triple in triples: # head, rel, tail =", "here if len(triples) > limit: break if token in head.split(\"_\"): core_entitys.add(head) if token", "tail == \" \": continue # add nodes if head not in nodes:", "loading tokenization results.') # build token set all_token_set = set() for sample in", "for line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some", "= build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords,", "extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line", "get the entity name, convert all to - Lowercase for uniformity. \"\"\" rel", "relation mapping file\"\"\" entity2id = {} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\")", "default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation", "triple in conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2] if token in", "pickle import argparse import os import nltk import logging import string from tqdm", "search triples for triple in conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2]", "en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f:", "head == \"\" or head == \" \": continue if tail == \"\"", "cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple ==", "build mappings of entities and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation =", "stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory if not", "tqdm import tqdm from nltk.corpus import wordnet as wn from multiprocessing import Pool", "entity2emb = {} relation2emb = {} for token, data in token2datas.items(): graph_triples =", "entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to load tokenization", "token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if", "if head not in entity2id.keys(): # entity2id[head] = len(entity2id) # if tail not", "head, rel, tail = triple[0], triple[1], triple[2] if cur_head == head or cur_head", "from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',", "entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of", "# entity2id[head] = len(entity2id) # if tail not in entity2id.keys(): # entity2id[tail] =", "# edge_attr : [num_edges, num_edge_features] nodes = [] edges = [] edges_attr =", "logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords and token", "type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity", "head or cur_head == tail or cur_tail == head or cur_tail == tail:", "edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token,", "# split all_token_set to processes parts and deal with multi-processing all_token_parts = []", "en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines():", "'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb,", "range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities and relations(all", "token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout:", ": retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58 '''", "= len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys: if len(entity) < min_len:", "emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb in", "not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail] =", "head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the", "f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail = ls[2].strip()", "core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the shortest", "pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build token set all_token_set = set()", "with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"):", "relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f:", "default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt',", "help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length') args =", "samples logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token,", "{} # for triple in triples: # head, rel, tail = triple[0], triple[1],", "len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue # build graph for", "entity2id.items()} # id2relation = {v:k for k,v in relation2id.items()} # return entity2id, id2entity,", "= ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as", "of entities and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path,", "entity == \"\" or entity == \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity +", "entity, choose the shortest core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len =", "help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb", "# stopword_cnt += 1 continue if args.ignore_length > 0 and len(token) <= args.ignore_length:", "KG, {}/{} retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb)))", "idx in entity2id.items(): # f_e.write(entity + \" \" + str(idx)) # f_e.write('\\n') #", "and build sub-graph logger.info('Begin to retrieve neighbor triples and build sub-graph...') # token2graph", "neighbor triples and build sub-graph logger.info('Begin to retrieve neighbor triples and build sub-graph...')", "triple[1], triple[2] if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not", "1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping", "tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\",", "import sys sys.path.append(\".\") import random import pickle import argparse import os import nltk", "dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from an", "+= 1 continue if args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{} is", "edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples def", "and len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue # build graph", "relation_path): # \"\"\"build mapping of entities and triples\"\"\" # entity2id = {} #", "core_entitys: if len(entity) < min_len: min_len = len(entity) min_entity = entity core_entity =", "logging import string from tqdm import tqdm from nltk.corpus import wordnet as wn", "(i+1) * part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing", "retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {}", "core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0]", "args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue # build graph for token here", "and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to trim", "conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for", "with length <= ignore_length') args = parser.parse_args() # load ConceptNet here logger.info(\"Begin loading", "the \"/c/en/\" and just get the entity name, convert all to - Lowercase", "multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES) for i in range(PROCESSES):", "set() # search triples for triple in conceptnet_triples: head, rel, tail = triple[0],", "= line.split(\" \") # pass first line if len(ls) <= 1: continue rel", "multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt", "!= PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums] else:", "if tail not in entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel not", "{}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb))) if __name__ == '__main__': main()", "path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\")", "tokenization results.') # build token set all_token_set = set() for sample in train_samples", "/ PROCESSES) for i in range(PROCESSES): if i != PROCESSES - 1: cur_token_set", "{} with open(entity_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls =", "edge : [2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes = [] edges", "= [] core_entitys = set() # search triples for triple in conceptnet_triples: head,", "random import pickle import argparse import os import nltk import logging import string", "import pickle import argparse import os import nltk import logging import string from", "token2graph = dict() # stopword_cnt = 0 # punctuation_cnt = 0 all_token_set =", "import tqdm from nltk.corpus import wordnet as wn from multiprocessing import Pool logging.basicConfig(format", "path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true',", "default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory')", "to retrieve neighbor triples and build sub-graph...') # token2graph = dict() # stopword_cnt", "triple in triples: head, rel, tail = triple[0], triple[1], triple[2] # remove empty", "continue head, rel, tail = triple[0], triple[1], triple[2] if cur_head == head or", "from an entity string, if present. :param s: Entity string. :return: Entity string", "= logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path,", "return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as", "'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb =", "action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length')", "'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{}", "if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple", "tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of", "sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token set.') # load", "[] edges_attr = [] token_triples = [] for triple in triples: head, rel,", "loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five", "= search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr,", "{} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part,", "mapping file\"\"\" entity2id = {} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as", "in conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2] if token in head.split(\"_\")", "stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length >", "num_edges] # edge_attr : [num_edges, num_edge_features] nodes = [] edges = [] edges_attr", "entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {} relation2emb = {}", "%(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger", "rel, tail = triple[0], triple[1], triple[2] if cur_head == head or cur_head ==", "logger.info('Finished loading stopwords list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive", "min_len = len(entity) min_entity = entity core_entity = min_entity else: core_entity = None", "and token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue", "# load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory", "with open(entity_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\"", "+ \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb in relation2emb.items():", "= pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {} tokens retrieved at", "ls[1].strip() tail = ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples #", "relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb = {} relation2emb = {} for", "if i != PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums: (i+1) *", "line if len(ls) <= 1: continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel]", "parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length') args = parser.parse_args() #", "is too short, skipped!'.format(token)) continue # build graph for token here nodes, edges,", "build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples =", "relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to load tokenization results...')", "import wordnet as wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s", "f.write(entity + \" \" + \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w',", "with open(relation_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split(\"", "in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords", "edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin", "triple[0], triple[1], triple[2] if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel", "ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def search_triples(token, conceptnet_triples,", "stopword_cnt += 1 continue if args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{}", "= pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build token set all_token_set =", "entity2id[head] = len(entity2id) # if tail not in entity2id.keys(): # entity2id[tail] = len(entity2id)", "file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt',", "stopword_cnt = 0 # punctuation_cnt = 0 all_token_set = list(all_token_set) # split all_token_set", "relation2emb.items(): assert len(emb) == 100 f.write(rel + \" \" + \" \".join(map(str, emb))", "core_entity \"\"\" import sys sys.path.append(\".\") import random import pickle import argparse import os", "# f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n') # id2entity = {v:k", "args = parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples =", "id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and entities.\") #", "default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id", "(nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with", "= all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {}", "not in nodes: nodes.append(head) if tail not in nodes: nodes.append(tail) # add edge", "parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0,", "idx in relation2id.items(): # f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n') #", "in relation2id.items(): # f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n') # id2entity", "help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\")", "all to - Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower()", "= get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples logger.info('Begin to load tokenization results...') train_samples", "dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build token set all_token_set", "for entity in core_entitys: if len(entity) < min_len: min_len = len(entity) min_entity =", "pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech", "if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1]", "head == \" \": continue if tail == \"\" or tail == \"", "string from tqdm import tqdm from nltk.corpus import wordnet as wn from multiprocessing", "if triple == cur_triple: continue head, rel, tail = triple[0], triple[1], triple[2] if", "len(triples) > limit: break if token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"):", "Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close()", "is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords and token in", "core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function", "'rb')) logger.info('Finished loading tokenization results.') # build token set all_token_set = set() for", "# logger.info(\"Finished mapping of relations and entities.\") # get concept mapping logger.info(\"get concept", "parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True,", "= parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path)", "for triple in graph_triples: head, rel, tail = triple[0], triple[1], triple[2] if head", "+ str(idx)) # f_r.write('\\n') # id2entity = {v:k for k,v in entity2id.items()} #", "for k,v in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path):", "line.split(\" \") # pass first line if len(ls) <= 1: continue rel =", "path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path',", "= [] for triple in triples: head, rel, tail = triple[0], triple[1], triple[2]", "# with open(entity_path, 'w') as f_e: # for entity, idx in entity2id.items(): #", "= extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\") for i in", "get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id = {} relation2id =", "else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal", "= entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data',", "set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path',", "+ \" \" + str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as f_r:", "all_token_set to processes parts and deal with multi-processing all_token_parts = [] part_token_nums =", "entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb", "== tail or cur_tail == head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples", "PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\")", "# if head not in entity2id.keys(): # entity2id[head] = len(entity2id) # if tail", "@Author : yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分:", "retrive neighbor triples and build sub-graph logger.info('Begin to retrieve neighbor triples and build", "<= 1: continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with", "# combine all results logger.info('Finished retrieving token graphs, combine all result...') token2datas =", "\"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel,", "encoding from an entity string, if present. :param s: Entity string. :return: Entity", "\"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge : [2, num_edges] #", "60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f:", "if tail == \"\" or tail == \" \": continue # add nodes", "@File : retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58", "all results logger.info('Finished retrieving token graphs, combine all result...') token2datas = {} for", "tail not in nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel)", "as f: for entity, emb in entity2emb.items(): assert len(emb) == 100 if entity", "in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities and", "default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length') args", "id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id", "1: cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i", "search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() # search triples", "= [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls", "loading stopwords list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor", "wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -", "f: for rel, emb in relation2emb.items(): assert len(emb) == 100 f.write(rel + \"", "datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60", "stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") # combine all results logger.info('Finished retrieving", "= (head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): #", "triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\") for", "100 if entity == \"\" or entity == \" \": logger.info(\"empty entity: {}\".format(entity))", "rel, tail = triple[0], triple[1], triple[2] # if head not in entity2id.keys(): #", "edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes,", "edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part,", "path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str,", "conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities", "in f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail =", "entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str,", "tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout:", "= {} with open(entity_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls", "neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge :", "args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue", "continue # build graph for token here nodes, edges, edges_attr, token_triples, core_entity =", "not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple =", "== head or cur_head == tail or cur_tail == head or cur_tail ==", "argparse import os import nltk import logging import string from tqdm import tqdm", "[2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes = [] edges = []", "not in entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel not in relation2id.keys():", "triples for triple in conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2] if", "'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def", "# load pickled samples logger.info('Begin to load tokenization results...') train_samples = pickle.load(open(args.train_token, 'rb'))", "conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\") for i", "relation2emb = {} for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for triple", "= min_entity else: core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\"", "continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r',", "id2relation = {v:k for k,v in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation", "token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity string,", "'w', encoding=\"utf-8\") as f: for rel, emb in relation2emb.items(): assert len(emb) == 100", "or cur_tail == head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples):", "build sub-graph logger.info('Begin to retrieve neighbor triples and build sub-graph...') # token2graph =", "# id2entity = {v:k for k,v in entity2id.items()} # id2relation = {v:k for", "# mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build", "''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\"", "== head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\"", "loading concept english triples.') logger.info(\"sample five triples...\") for i in range(5): triple =", ": yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1.", "x : [num_nodes, num_node_features] # edge : [2, num_edges] # edge_attr : [num_edges,", "continue if tail == \"\" or tail == \" \": continue # add", "PROCESSES) for i in range(PROCESSES): if i != PROCESSES - 1: cur_token_set =", "not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple) return en_triples", "= idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls", "fout) logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in:", "# token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb')", "i in range(PROCESSES): if i != PROCESSES - 1: cur_token_set = all_token_set[i *", "= {} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data", "stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid()))", "parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished", "not in relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as f_e:", "if head == \"\" or head == \" \": continue if tail ==", "triple[1], triple[2] if cur_head == head or cur_head == tail or cur_tail ==", "first line if len(ls) <= 1: continue rel = ls[0].strip() idx = int(ls[1].strip())", "0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys: if len(entity)", "nodes.append(head) if tail not in nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail,", "continue if args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{} is too short,", "f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with", "idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys", "first line if len(ls) <= 1: continue entity = ls[0].strip() idx = int(ls[1].strip())", "default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb", "= del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():", "retrieve neighbor triples and build sub-graph...') # token2graph = dict() # stopword_cnt =", "skipped!'.format(token)) continue # build graph for token here nodes, edges, edges_attr, token_triples, core_entity", "directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length", "min_entity else: core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples", "== \"\" or tail == \" \": continue # add nodes if head", "s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity", "encoding: utf-8 -*- ''' @File : retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL>", "entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail", "# retrive neighbor triples and build sub-graph logger.info('Begin to retrieve neighbor triples and", "or s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb):", "def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for", "cur_head == tail or cur_tail == head or cur_tail == tail: neighbor_triples.append(triple) return", ": <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G", "import os import nltk import logging import string from tqdm import tqdm from", "all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb),", "token_triples = [] for triple in triples: head, rel, tail = triple[0], triple[1],", "retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity,", "返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge : [2, num_edges] # edge_attr", "if head not in nodes: nodes.append(head) if tail not in nodes: nodes.append(tail) #", "for token in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) #", "token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at process", "conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph,", "# id2relation = {v:k for k,v in relation2id.items()} # return entity2id, id2entity, relation2id,", "if len(ls) <= 1: continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] =", "len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys: if len(entity) < min_len: min_len", "min_entity = entity core_entity = min_entity else: core_entity = None return triples, core_entity", "== \" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str,", "to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in enumerate(all_token_parts):", "part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) #", "if rel not in relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path, 'w')", "limit retrieved knowledge here if len(triples) > limit: break if token in head.split(\"_\"):", "name, convert all to - Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head", "in entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel not in relation2id.keys(): #", "list(all_token_set) # split all_token_set to processes parts and deal with multi-processing all_token_parts =", "entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb", "from nltk.corpus import wordnet as wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s", "file\"\"\" entity2id = {} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as f:", "rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not in entity2emb: entity2emb[tail]", "results...') train_samples = pickle.load(open(args.train_token, 'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.')", "choose the shortest core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len = len(core_entitys[0])", "type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\") parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str,", "logger.info(\"all processes done!\") # combine all results logger.info('Finished retrieving token graphs, combine all", "\"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head, rel, tail)", "as f: for line in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):", "if args.ignore_length > 0 and len(token) <= args.ignore_length: logger.info('{} is too short, skipped!'.format(token))", "core_entity = None return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = []", "+ dev_samples: for token in sample['query_tokens'] + sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results", "%H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\"", "or tail == \" \": continue # add nodes if head not in", "set() for sample in train_samples + dev_samples: for token in sample['query_tokens'] + sample['document_tokens']:", "import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt =", "logger.info('Finished loading tokenization results.') # build token set all_token_set = set() for sample", "* part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set)", "token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] =", "entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations", "all_token_parts = [] part_token_nums = int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if", "3. core_entity \"\"\" import sys sys.path.append(\".\") import random import pickle import argparse import", "nodes if head not in nodes: nodes.append(head) if tail not in nodes: nodes.append(tail)", "= ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple = (head, rel, tail)", "pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8')", "with open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2:", "rel not in relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as", "= triple[0], triple[1], triple[2] if cur_head == head or cur_head == tail or", "\"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() # search triples for triple in", "ls = line.split(\" \") # pass first line if len(ls) <= 1: continue", "token2datas = {} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin:", "for entity, idx in entity2id.items(): # f_e.write(entity + \" \" + str(idx)) #", "core entity, choose the shortest core_entitys = list(core_entitys) if len(core_entitys) != 0: min_len", "\".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used, {}/{} retrieved", "edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir,", "num_node_features] # edge : [2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes =", "= del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not", "retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas,", "Deletes part-of-speech encoding from an entity string, if present. :param s: Entity string.", "= {} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as f: for line", "sub-graph logger.info('Begin to retrieve neighbor triples and build sub-graph...') # token2graph = dict()", "token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run", "as f_e: # for entity, idx in entity2id.items(): # f_e.write(entity + \" \"", "and build sub-graph...') # token2graph = dict() # stopword_cnt = 0 # punctuation_cnt", "help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length',", "'w') as f_e: # for entity, idx in entity2id.items(): # f_e.write(entity + \"", "triple[2] # if head not in entity2id.keys(): # entity2id[head] = len(entity2id) # if", "(head, rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build", "with open(relation_path, 'w') as f_r: # for relation, idx in relation2id.items(): # f_r.write(relation", "line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple = (head,", "= build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and entities.\") # get", "in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity", "type=int, default=0, help='ignore words with length <= ignore_length') args = parser.parse_args() # load", "\"\"\"read entity and relation mapping file\"\"\" entity2id = {} relation2id = {} with", "+ \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.\".format(", "head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if len(triples)", "relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb) with open(args.entity2emb_path,", "# relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as f_e: # for entity,", "processes done!\") # combine all results logger.info('Finished retrieving token graphs, combine all result...')", "\" \" + str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as f_r: #", "all results done!\") logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set)))", "as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2)", "with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity", "= line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple =", "build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] # edge : [2, num_edges]", "sys sys.path.append(\".\") import random import pickle import argparse import os import nltk import", "as wn from multiprocessing import Pool logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s", "conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() # search triples for", "relation_path): \"\"\"read entity and relation mapping file\"\"\" entity2id = {} relation2id = {}", "{}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity string, if present.", "or token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if len(triples) >", "entity, emb in entity2emb.items(): assert len(emb) == 100 if entity == \"\" or", "len(entity) min_entity = entity core_entity = min_entity else: core_entity = None return triples,", "part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") # combine all results", "# search triples for triple in conceptnet_triples: head, rel, tail = triple[0], triple[1],", ": [num_nodes, num_node_features] # edge : [2, num_edges] # edge_attr : [num_edges, num_edge_features]", "nodes = [] edges = [] edges_attr = [] token_triples = [] for", "id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and", "as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb)", ": [2, num_edges] # edge_attr : [num_edges, num_edge_features] nodes = [] edges =", "rel, tail) conceptnet_triples.append(triple) return conceptnet_triples # def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping", "cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple", "triples...\") for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings", "sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import", "= '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def", "def del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity string, if present. :param", "if tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main():", "def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1],", "import nltk import logging import string from tqdm import tqdm from nltk.corpus import", "[] token_triples = [] for triple in triples: head, rel, tail = triple[0],", "core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges,", "triples: head, rel, tail = triple[0], triple[1], triple[2] # remove empty entity triple", "cur_triple[2] for triple in conceptnet_triples: if triple == cur_triple: continue head, rel, tail", "all_token_set = list(all_token_set) # split all_token_set to processes parts and deal with multi-processing", "continue triple = (head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples", "# def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities and triples\"\"\" #", "f: for line in f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel =", "and relation mapping file\"\"\" entity2id = {} relation2id = {} with open(entity_path, 'r',", "ConceptNet here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english", "as f: for line in f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel", "# punctuation_cnt = 0 all_token_set = list(all_token_set) # split all_token_set to processes parts", "neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes, num_node_features] #", "in f.readlines(): ls = line.split(\" \") # pass first line if len(ls) <=", "# add nodes if head not in nodes: nodes.append(head) if tail not in", "relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train", "os import nltk import logging import string from tqdm import tqdm from nltk.corpus", "del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\",", "= [] edges_attr = [] token_triples = [] for triple in triples: head,", "{v:k for k,v in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path,", "conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"]", "int(ls[1].strip()) relation2id[rel] = idx return entity2id, relation2id def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples", "graph_triples = data[\"graph_triples\"] for triple in graph_triples: head, rel, tail = triple[0], triple[1],", "done!\") logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir,", "type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt',", "int(len(all_token_set) / PROCESSES) for i in range(PROCESSES): if i != PROCESSES - 1:", "head or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" #", "for triple in triples: head, rel, tail = triple[0], triple[1], triple[2] # remove", "return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token,", "[] core_entitys = set() # search triples for triple in conceptnet_triples: head, rel,", "def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens graph\"\"\" logger.info(\"begin run function {}", "logger.info(\"sample five triples...\") for i in range(5): triple = random.choice(conceptnet_triples) logger.info(triple) # #", "triples = [] core_entitys = set() # search triples for triple in conceptnet_triples:", "entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token',", "@Contact : <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1)", "k,v in entity2id.items()} # id2relation = {v:k for k,v in relation2id.items()} # return", "continue if args.no_stopwords and token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt", "neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in", "p.join() logger.info(\"all processes done!\") # combine all results logger.info('Finished retrieving token graphs, combine", "set all_token_set = set() for sample in train_samples + dev_samples: for token in", "or cur_head == tail or cur_tail == head or cur_tail == tail: neighbor_triples.append(triple)", "+ \" \" + \" \".join(map(str, emb)) + \"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\")", "len(entity2id) # if rel not in relation2id.keys(): # relation2id[rel] = len(relation2id) # with", "[] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls =", "return triples, core_entity def search_triple_neighbor(cur_triple, conceptnet_triples): \"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail", "parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl',", "ls = line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail = ls[2].strip() triple", "extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\") for i in range(5):", "stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory if not os.path.exists(args.output_dir):", "edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges,", "of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path') parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help=\"entity2id", "token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {} tokens retrieved", "= argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data',", "results.') # build token set all_token_set = set() for sample in train_samples +", "stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length > 0 and len(token)", "# with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved", "core_entitys[0] for entity in core_entitys: if len(entity) < min_len: min_len = len(entity) min_entity", "len(entity) < min_len: min_len = len(entity) min_entity = entity core_entity = min_entity else:", "cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple == cur_triple:", "= triple[0], triple[1], triple[2] if head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if", "default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev", "if len(ls) <= 1: continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] =", "for entity, emb in entity2emb.items(): assert len(emb) == 100 if entity == \"\"", "lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished", "f: for entity, emb in entity2emb.items(): assert len(emb) == 100 if entity ==", "\"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in", "del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue", "token2datas = {} for token in tqdm(token_part): if token in set(string.punctuation): logger.info('{} is", "= len(relation2id) # with open(entity_path, 'w') as f_e: # for entity, idx in", "punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords and token in stopwords:", "# token2graph = dict() # stopword_cnt = 0 # punctuation_cnt = 0 all_token_set", "'w', encoding='utf-8') as f: for entity, emb in entity2emb.items(): assert len(emb) == 100", "token2data[\"core_entity\"] = core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas,", "concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\")", "entity2id.keys(): # entity2id[tail] = len(entity2id) # if rel not in relation2id.keys(): # relation2id[rel]", "= pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1:", "open(args.relation_emb_path, 'rb') as f2: relation_emb = pickle.load(f2) entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id,", "help=\"entity2id path\") parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help=\"relation2id path\") parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help=\"entity emb path\")", "* part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p", "define core entity, choose the shortest core_entitys = list(core_entitys) if len(core_entitys) != 0:", "entity and relation mapping file\"\"\" entity2id = {} relation2id = {} with open(entity_path,", "continue # add nodes if head not in nodes: nodes.append(head) if tail not", "= '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level", "build_mapping(conceptnet_triples, args.entity_path, args.relation_path) # logger.info(\"Finished mapping of relations and entities.\") # get concept", "= logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples =", "rel = ls[1].strip() tail = ls[2].strip() triple = (head, rel, tail) conceptnet_triples.append(triple) return", "检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import sys", "nltk import logging import string from tqdm import tqdm from nltk.corpus import wordnet", "{} relation2emb = {} for token, data in token2datas.items(): graph_triples = data[\"graph_triples\"] for", "ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as f:", "# x : [num_nodes, num_node_features] # edge : [2, num_edges] # edge_attr :", "entities and relations(all ConceptNet) # entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path)", "function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token in tqdm(token_part):", "= entity core_entity = min_entity else: core_entity = None return triples, core_entity def", "cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x :", "logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with", "= len(entity2id) # if tail not in entity2id.keys(): # entity2id[tail] = len(entity2id) #", "relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as f_e: # for", "multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part", "= Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,))", "tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges,", "relation2id[rel] = len(relation2id) # with open(entity_path, 'w') as f_e: # for entity, idx", "entity name, convert all to - Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower()", "head not in entity2emb: entity2emb[head] = entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel]", "\"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return", "i != PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums]", "relation_emb) with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb in entity2emb.items(): assert", "\"\\n\") with open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb in relation2emb.items(): assert", "if token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge", "in f.readlines(): ls = line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: -", ":param s: Entity string. :return: Entity string with part-of-speech encoding removed. \"\"\" if", "# entity2id = {} # relation2id = {} # for triple in triples:", "logger.info('Finished loading concept english triples.') logger.info(\"sample five triples...\") for i in range(5): triple", "logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: #", "nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve tokens", "here logger.info(\"Begin loading concept triples...\") conceptnet_triples = extract_triples(args.conceptnet_path) logger.info('Finished loading concept english triples.')", "> limit: break if token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail)", "triple in conceptnet_triples: if triple == cur_triple: continue head, rel, tail = triple[0],", "检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import random import", "tokenization results into token set.') # load stopwords stopwords = set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading", "for relation, idx in relation2id.items(): # f_r.write(relation + \" \" + str(idx)) #", "'%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level =", "i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data)", "entity_path, relation_path): # \"\"\"build mapping of entities and triples\"\"\" # entity2id = {}", "pass first line if len(ls) <= 1: continue entity = ls[0].strip() idx =", "entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read entity and relation mapping file\"\"\"", "search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples,", "\") # pass first line if len(ls) <= 1: continue rel = ls[0].strip()", "- 1: cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set =", "== 100 if entity == \"\" or entity == \" \": logger.info(\"empty entity:", "def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\" contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples", "\"\"\" import sys sys.path.append(\".\") import random import pickle import argparse import os import", "the entity name, convert all to - Lowercase for uniformity. \"\"\" rel =", "triple[0], triple[1], triple[2] # if head not in entity2id.keys(): # entity2id[head] = len(entity2id)", "help=\"relation emb path\") parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path')", "build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples", "open(args.entity_emb_path, 'rb') as f1: entity_emb = pickle.load(f1) with open(args.relation_emb_path, 'rb') as f2: relation_emb", "as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.') # with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'),", "for triple in conceptnet_triples: head, rel, tail = triple[0], triple[1], triple[2] if token", "def build_mapping(triples, entity_path, relation_path): # \"\"\"build mapping of entities and triples\"\"\" # entity2id", "add nodes if head not in nodes: nodes.append(head) if tail not in nodes:", "entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser = argparse.ArgumentParser() parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data', help='token", "retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\"", "tail.split(\"_\"): triples.append(triple) # limit retrieved knowledge here if len(triples) > limit: break if", "= triple[0], triple[1], triple[2] if token in head.split(\"_\") or token in tail.split(\"_\"): triples.append(triple)", "triple if head == \"\" or head == \" \": continue if tail", "idx = int(ls[1].strip()) entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for", "string. :return: Entity string with part-of-speech encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\")", "relation, idx in relation2id.items(): # f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n')", "token graphs, combine all result...') token2datas = {} for i in range(PROCESSES): with", "\": continue # add nodes if head not in nodes: nodes.append(head) if tail", "head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if", "logger.info(\"For all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb),", "\" \": logger.info(\"empty entity: {}\".format(entity)) f.write(entity + \" \" + \" \".join(map(str, emb))", "tail == \"\" or tail == \" \": continue # add nodes if", "del_pos(s): \"\"\" Deletes part-of-speech encoding from an entity string, if present. :param s:", "list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and", "edges, edges_attr, token_triples = build_graph(contained_triples) return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index,", "len(emb) == 100 if entity == \"\" or entity == \" \": logger.info(\"empty", "== \" \": continue if tail == \"\" or tail == \" \":", "type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords') parser.add_argument('--ignore_length', type=int, default=0, help='ignore", "open(relation_path, 'w') as f_r: # for relation, idx in relation2id.items(): # f_r.write(relation +", "ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding. - Split(\"/\")[-1] to trim the", "present. :param s: Entity string. :return: Entity string with part-of-speech encoding removed. \"\"\"", "for line in f.readlines(): ls = line.split(\" \") # pass first line if", "tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the shortest core_entitys = list(core_entitys) if", "= cur_triple[0], cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple == cur_triple: continue", "-*- ''' @File : retrieve_1hop.py @Author : yyhaker @Contact : <EMAIL> @Time :", "# add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges) ==", "\" + str(idx)) # f_r.write('\\n') # id2entity = {v:k for k,v in entity2id.items()}", "\"\" or head == \" \": continue if tail == \"\" or tail", "open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\")", "as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\"", "<= ignore_length') args = parser.parse_args() # load ConceptNet here logger.info(\"Begin loading concept triples...\")", "get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load", "core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"]", "triples\"\"\" # entity2id = {} # relation2id = {} # for triple in", "in entity2id.keys(): # entity2id[head] = len(entity2id) # if tail not in entity2id.keys(): #", "def search_triples(token, conceptnet_triples, limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() # search", "results done!\") logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with", "token2data = {} token2data[\"sub_graph\"] = (nodes, edges, edges_attr) token2data[\"graph_triples\"] = token_triples token2data[\"core_entity\"] =", "'rb')) dev_samples = pickle.load(open(args.eval_token, 'rb')) logger.info('Finished loading tokenization results.') # build token set", "def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\" entity2emb =", "core_entity token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished", "uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not", "os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph logger.info('Begin to retrieve neighbor", "stopwords list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples", "!= 0: min_len = len(core_entitys[0]) min_entity = core_entitys[0] for entity in core_entitys: if", "with open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb in entity2emb.items(): assert len(emb)", "len(ls) <= 1: continue entity = ls[0].strip() idx = int(ls[1].strip()) entity2id[entity] = idx", "type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set') parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path')", "= core_entitys[0] for entity in core_entitys: if len(entity) < min_len: min_len = len(entity)", "all result...') token2datas = {} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb')", "16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph (1) 检索出头或者尾部包含该词的三元组,构建子图G 2. sub-graph triples 3. core_entity", "import logging import string from tqdm import tqdm from nltk.corpus import wordnet as", "assert len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples): \"\"\"根据给定的token,构建子图\"\"\"", "entity2id = {} relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as f: for", "mapping logger.info(\"get concept mapping...\") entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path) # load pickled samples", "yyhaker @Contact : <EMAIL> @Time : 2020/04/07 16:33:58 ''' \"\"\" 检索知识图谱:对于某个token,分别检索出三部分: 1. sub-graph", "in graph_triples: head, rel, tail = triple[0], triple[1], triple[2] if head not in", "open(args.entity2emb_path, 'w', encoding='utf-8') as f: for entity, emb in entity2emb.items(): assert len(emb) ==", "\"\"\"build mapping of entities and triples\"\"\" # entity2id = {} # relation2id =", "i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all", "# multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i,", "used, {}/{} retrieved relations used.\".format( len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb))) if __name__ == '__main__':", "at lease 1 graph.'.format(len(token2datas), len(all_token_set))) with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout)", "open(args.relation2emb_path, 'w', encoding=\"utf-8\") as f: for rel, emb in relation2emb.items(): assert len(emb) ==", "cur_triple[1], cur_triple[2] for triple in conceptnet_triples: if triple == cur_triple: continue head, rel,", "with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping retrieved token graphs.')", "relation2id = {} with open(entity_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines():", "encoding removed. \"\"\" if s.endswith(\"/n\") or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s =", "range(PROCESSES): if i != PROCESSES - 1: cur_token_set = all_token_set[i * part_token_nums: (i+1)", "- Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail =", "# build graph for token here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token,", "token here nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples) token2data = {}", "rel, tail = triple[0], triple[1], triple[2] if head not in entity2emb: entity2emb[head] =", "f_e.write('\\n') # with open(relation_path, 'w') as f_r: # for relation, idx in relation2id.items():", "p = Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords,", "0 # punctuation_cnt = 0 all_token_set = list(all_token_set) # split all_token_set to processes", "tail = triple[0], triple[1], triple[2] if cur_head == head or cur_head == tail", "to - Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail", "if present. :param s: Entity string. :return: Entity string with part-of-speech encoding removed.", "logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in", "enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,)) p.close() p.join() logger.info(\"all processes done!\") #", "done!\") # combine all results logger.info('Finished retrieving token graphs, combine all result...') token2datas", "just get the entity name, convert all to - Lowercase for uniformity. \"\"\"", "sub-graph triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import random import pickle import", "len(ls) <= 1: continue rel = ls[0].strip() idx = int(ls[1].strip()) relation2id[rel] = idx", "in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine", "token in head.split(\"_\"): core_entitys.add(head) if token in tail.split(\"_\"): core_entitys.add(tail) # define core entity,", "head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue triple = (head,", "(head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples = [] with", "mapping of entities and triples\"\"\" # entity2id = {} # relation2id = {}", "if tail not in nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head])", "k,v in relation2id.items()} # return entity2id, id2entity, relation2id, id2relation def get_concept_mapping(entity_path, relation_path): \"\"\"read", "edges_attr = [] token_triples = [] for triple in triples: head, rel, tail", "set(string.punctuation): logger.info('{} is punctuation, skipped!'.format(token)) # punctuation_cnt += 1 continue if args.no_stopwords and", "relation2id.items(): # f_r.write(relation + \" \" + str(idx)) # f_r.write('\\n') # id2entity =", "open(conceptnet_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines(): ls = line.split('\\t') if", "if args.no_stopwords and token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt +=", "s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas, entity2id,", "token2datas[token] = token2data with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout: pickle.dump(token2datas, fout) logger.info('Finished dumping", "= set(nltk.corpus.stopwords.words('english')) logger.info('Finished loading stopwords list.') # mk directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir)", "triple = random.choice(conceptnet_triples) logger.info(triple) # # build mappings of entities and relations(all ConceptNet)", "== 100 f.write(rel + \" \" + \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For", "len(entity2id) # if tail not in entity2id.keys(): # entity2id[tail] = len(entity2id) # if", "token set all_token_set = set() for sample in train_samples + dev_samples: for token", "\"\"\"检索出三元组的相邻的三元组\"\"\" neighbor_triples = [] cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2] for triple", "help=\"entity2emb path\") parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path') parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory') parser.add_argument('--no_stopwords',", "help='ignore words with length <= ignore_length') args = parser.parse_args() # load ConceptNet here", "%(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__)", "<= args.ignore_length: logger.info('{} is too short, skipped!'.format(token)) continue # build graph for token", "logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path): \"\"\"检索出所有英文的三元组\"\"\" en_triples = [] with open(conceptnet_path, 'r',", "or s.endswith(\"/a\") or s.endswith(\"/v\") or s.endswith(\"/r\"): s = s[:-2] return s def retrieved_entity_rel_emb(token2datas,", "default=0, help='ignore words with length <= ignore_length') args = parser.parse_args() # load ConceptNet", "return nodes, edges, edges_attr, token_triples, core_entity def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args): \"\"\"retrieve", "token in stopwords: logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if", "logger.info(\"begin run function {} at process {}\".format(retrieve_tokens_graph, os.getpid())) token2datas = {} for token", "triple = (head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\" conceptnet_triples =", "return s def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb): \"\"\"retrieve entity and relation embeddings\"\"\"", "\"\").isalpha(): continue triple = (head, rel, tail) en_triples.append(triple) return en_triples def extract_triples(conceptnet_path): \"\"\"检索出conceptnet中的三元组\"\"\"", "processes parts and deal with multi-processing all_token_parts = [] part_token_nums = int(len(all_token_set) /", "logger.info('Finished dumping retrieved token graphs {}'.format(index)) def del_pos(s): \"\"\" Deletes part-of-speech encoding from", "entity2id[entity] = idx with open(relation_path, 'r', encoding=\"utf-8\") as f: for line in f.readlines():", "part-of-speech encoding from an entity string, if present. :param s: Entity string. :return:", "string, if present. :param s: Entity string. :return: Entity string with part-of-speech encoding", "convert all to - Lowercase for uniformity. \"\"\" rel = ls[1].split(\"/\")[-1].lower() head =", "in nodes: nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple)", "ls[1].split(\"/\")[-1].lower() head = del_pos(ls[2]).split(\"/\")[-1].lower() tail = del_pos(ls[3]).split(\"/\")[-1].lower() if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha(): continue", "limit=20): \"\"\"检索出头或者尾部包含该词的三元组\"\"\" triples = [] core_entitys = set() # search triples for triple", "== tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x : [num_nodes,", "token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {} tokens retrieved at lease 1", "open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in: # token2datas = pickle.load(f_in) logger.info(\"save retrieved entity and", "f_e: # for entity, idx in entity2id.items(): # f_e.write(entity + \" \" +", "'%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) PROCESSES = 60 def extract_en_triples(conceptnet_path):", "] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin to deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES)", "line in f.readlines(): ls = line.split(\",\") head = ls[0].strip() rel = ls[1].strip() tail", "* part_token_nums] else: cur_token_set = all_token_set[i * part_token_nums: ] all_token_parts.append(cur_token_set) # multi-processing logger.info(\"Begin", "tail not in entity2emb: entity2emb[tail] = entity_emb[entity2id[tail]] return entity2emb, relation2emb def main(): parser", "triple[1], triple[2] # if head not in entity2id.keys(): # entity2id[head] = len(entity2id) #", "deal with {} processes...\".format(PROCESSES)) p = Pool(PROCESSES) for i, part in enumerate(all_token_parts): p.apply_async(retrieve_tokens_graph,", "if token in tail.split(\"_\"): core_entitys.add(tail) # define core entity, choose the shortest core_entitys", "= entity_emb[entity2id[head]] if rel not in relation2emb: relation2emb[rel] = relation_emb[relation2id[rel]] if tail not", "# if rel not in relation2id.keys(): # relation2id[rel] = len(relation2id) # with open(entity_path,", "fin: token2data = pickle.load(fin) token2datas.update(token2data) logger.info(\"combine all results done!\") logger.info('{} / {} tokens", "os.getpid())) token2datas = {} for token in tqdm(token_part): if token in set(string.punctuation): logger.info('{}", "contained_triples, core_entity = search_triples(token, conceptnet_triples) nodes, edges, edges_attr, token_triples = build_graph(contained_triples) return nodes,", "= line.split('\\t') if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'): \"\"\" Some preprocessing: - Remove part-of-speech encoding.", "triples 3. core_entity \"\"\" import sys sys.path.append(\".\") import random import pickle import argparse", "help='token file of train set') parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data', help='token file of dev set')", "emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used, {}/{} retrieved relations", "or cur_tail == tail: neighbor_triples.append(triple) return neighbor_triples def build_graph(triples): \"\"\"连接相同的实体构建子图, 返回子图G\"\"\" # x", "# for relation, idx in relation2id.items(): # f_r.write(relation + \" \" + str(idx))", "logger.info('{} is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length > 0", "pickle.load(f_in) logger.info(\"save retrieved entity and relation embeddings...\") with open(args.entity_emb_path, 'rb') as f1: entity_emb", "directory if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # retrive neighbor triples and build sub-graph logger.info('Begin", "concept english triples.') logger.info(\"sample five triples...\") for i in range(5): triple = random.choice(conceptnet_triples)", "of relations and entities.\") # get concept mapping logger.info(\"get concept mapping...\") entity2id, relation2id", "sample['document_tokens']: all_token_set.add(token) logger.info('Finished making tokenization results into token set.') # load stopwords stopwords", "+ \" \".join(map(str, emb)) + \"\\n\") logger.info(\"For all KG, {}/{} retrieved entities used,", "str(idx)) # f_e.write('\\n') # with open(relation_path, 'w') as f_r: # for relation, idx", "cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums] else: cur_token_set = all_token_set[i *", "edge_attr : [num_edges, num_edge_features] nodes = [] edges = [] edges_attr = []", "is stopword, skipped!'.format(token)) # stopword_cnt += 1 continue if args.ignore_length > 0 and", "{} for i in range(PROCESSES): with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin: token2data =", "nodes.append(tail) # add edge edges.append([head, tail]) edges.append([tail, head]) edges_attr.append(rel) edges_attr.append(rel) token_triples.append(triple) assert len(edges)", "entity2id[tail] = len(entity2id) # if rel not in relation2id.keys(): # relation2id[rel] = len(relation2id)", "token_triples.append(triple) assert len(edges) == len(edges_attr) return nodes, edges, edges_attr, token_triples def build_graph_for_token(token, conceptnet_triples):", "num_edge_features] nodes = [] edges = [] edges_attr = [] token_triples = []" ]
[ "f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return", "list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() # image dir image_file =", "FauthImage(base64String) # image_path = fauthImage.get_path() # image dir image_file = fauthImage.get_file() # image", "= face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found in unknown image if", "<gh_stars>0 import base64 import face_recognition from django.core.files.base import ContentFile from django.core.files import File", "def __init__(self, dataURI: str, *, name: str = 'temp'): self.image_uri = dataURI self.image_format,", "get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str:", "second_image_encodings[0], 0.4), 'message': '' } return { 'result': [], 'message': 'No face detected!'", "face_locations def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image", "face_recognition from django.core.files.base import ContentFile from django.core.files import File from django.conf import settings", "str) -> list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() # image dir", "-> list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() # image dir image_file", "str: try: file = open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str))", "# image file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def", "image file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1,", "self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self)", "self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image", "in unknown image if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4),", "dir image_file = fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file) face_locations =", "self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR", "str, *, name: str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,')", "FauthImage: def __init__(self, dataURI: str, *, name: str = 'temp'): self.image_uri = dataURI", "return face_locations def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image)", "FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) ->", "self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image)", "'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String)", "name=self.default_name) return File(image) def get_path(self) -> str: try: file = open(self.image_dir, 'xb') except", "dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def", "ContentFile from django.core.files import File from django.conf import settings class FauthImage: def __init__(self,", "def get_path(self) -> str: try: file = open(self.image_dir, 'xb') except FileExistsError: file =", "No face found in unknown image if second_image_encodings and first_image_encodings: return { 'result':", "django.core.files import File from django.conf import settings class FauthImage: def __init__(self, dataURI: str,", "image if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': ''", "found in unknown image if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0],", "django.conf import settings class FauthImage: def __init__(self, dataURI: str, *, name: str =", "dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image)", "# image_path = fauthImage.get_path() # image dir image_file = fauthImage.get_file() # image file", "= face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No", "{ 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return { 'result': [], 'message':", "= open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage", "face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1)", "image_path = fauthImage.get_path() # image dir image_file = fauthImage.get_file() # image file object", "self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name", "File(image) def get_path(self) -> str: try: file = open(self.image_dir, 'xb') except FileExistsError: file", "name: str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext =", "settings class FauthImage: def __init__(self, dataURI: str, *, name: str = 'temp'): self.image_uri", "face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings =", "settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str),", "= fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return", "-> str: try: file = open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb')", "def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() #", "return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) # image_path =", "__init__(self, dataURI: str, *, name: str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str", "file = open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return", "unknown image if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message':", "first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return { 'result':", "*, name: str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext", "= open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir", "dataURI: str, *, name: str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str =", "= dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR +", "file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) # image_path", "open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def", "def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image =", "face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face", "fauthImage.get_path() # image dir image_file = fauthImage.get_file() # image file object image =", "file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2)", "face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict: first_image =", "first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) #", "= 'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir", "face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return { 'result': [], 'message': 'No face", "-> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str: try:", "= face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict: first_image", "= face_recognition.face_encodings(second_image) # No face found in unknown image if second_image_encodings and first_image_encodings:", "import File from django.conf import settings class FauthImage: def __init__(self, dataURI: str, *,", "0.4), 'message': '' } return { 'result': [], 'message': 'No face detected!' }", "file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) #", "'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir =", "= self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) ->", "self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File:", "= ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str: try: file = open(self.image_dir,", "return File(image) def get_path(self) -> str: try: file = open(self.image_dir, 'xb') except FileExistsError:", "get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() # image", "import base64 import face_recognition from django.core.files.base import ContentFile from django.core.files import File from", "if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' }", "face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found in unknown", "open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage =", "dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}'", "= fauthImage.get_path() # image dir image_file = fauthImage.get_file() # image file object image", "file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str) -> list:", "# No face found in unknown image if second_image_encodings and first_image_encodings: return {", "from django.conf import settings class FauthImage: def __init__(self, dataURI: str, *, name: str", "= f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def", "= FauthImage(base64String) # image_path = fauthImage.get_path() # image dir image_file = fauthImage.get_file() #", "get_path(self) -> str: try: file = open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir,", "File from django.conf import settings class FauthImage: def __init__(self, dataURI: str, *, name:", "fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations", "and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return {", "str = 'temp'): self.image_uri = dataURI self.image_format, self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1]", "image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str: try: file =", "except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String: str)", "second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found in unknown image", "image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict:", "second_image_encodings = face_recognition.face_encodings(second_image) # No face found in unknown image if second_image_encodings and", "f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self)", "try: file = open(self.image_dir, 'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close()", "image_file = fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image)", "from django.core.files.base import ContentFile from django.core.files import File from django.conf import settings class", "= dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}'", "+ f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name)", "= face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings", "face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found in unknown image if second_image_encodings", "from django.core.files import File from django.conf import settings class FauthImage: def __init__(self, dataURI:", "object image = face_recognition.load_image_file(image_file) face_locations = face_recognition.face_locations(image) return face_locations def compare_faces(image1, image2) ->", "return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return { 'result': [],", "-> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings =", "face found in unknown image if second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]],", "fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path() # image dir image_file = fauthImage.get_file()", "ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str: try: file = open(self.image_dir, 'xb')", "# image dir image_file = fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file)", "self.image_str = dataURI.split(';base64,') self.image_ext = self.image_format.split('/')[-1] self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name =", "image dir image_file = fauthImage.get_file() # image file object image = face_recognition.load_image_file(image_file) face_locations", "base64 import face_recognition from django.core.files.base import ContentFile from django.core.files import File from django.conf", "import settings class FauthImage: def __init__(self, dataURI: str, *, name: str = 'temp'):", "import face_recognition from django.core.files.base import ContentFile from django.core.files import File from django.conf import", "compare_faces(image1, image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2)", "def get_file(self) -> File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) ->", "= face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found in", "first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings = face_recognition.face_encodings(second_image) # No face found", "import ContentFile from django.core.files import File from django.conf import settings class FauthImage: def", "'xb') except FileExistsError: file = open(self.image_dir, 'wb') file.write(base64.b64decode(self.image_str)) file.close() return self.image_dir def get_face_locations_from_base64(base64String:", "'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return { 'result': [], 'message': 'No", "= settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}' self.default_name = f'{name}.{self.image_ext}' def get_file(self) -> File: image =", "image2) -> dict: first_image = face_recognition.load_image_file(image1) first_image_encodings = face_recognition.face_encodings(first_image) second_image = face_recognition.load_image_file(image2) second_image_encodings", "second_image_encodings and first_image_encodings: return { 'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4), 'message': '' } return", "class FauthImage: def __init__(self, dataURI: str, *, name: str = 'temp'): self.image_uri =", "django.core.files.base import ContentFile from django.core.files import File from django.conf import settings class FauthImage:", "File: image = ContentFile(base64.b64decode(self.image_str), name=self.default_name) return File(image) def get_path(self) -> str: try: file", "face_recognition.face_encodings(second_image) # No face found in unknown image if second_image_encodings and first_image_encodings: return", "self.image_dir def get_face_locations_from_base64(base64String: str) -> list: fauthImage = FauthImage(base64String) # image_path = fauthImage.get_path()" ]
[ "return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1)", "for index, a in enumerate(b): if a == None: del v[index] if isinstance(request_param,", "del request_param[k] if isinstance(v, list): b = v.copy() for index, a in enumerate(b):", "\"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\')", "if isinstance(v, list): b = v.copy() for index, a in enumerate(b): if a", "from json import JSONDecodeError import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\"", "indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str =", "k, v in c.items(): if v == None: del request_param[k] if isinstance(v, list):", "request_param[k] if isinstance(v, list): b = v.copy() for index, a in enumerate(b): if", "json from json import JSONDecodeError import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str):", "None: del b[index] else: c = a.copy() for k, v in c.items(): if", "c.items(): if v == None: del request_param[k] if isinstance(v, list): b = v.copy()", "v == None: del request_param[k] if isinstance(v, list): b = v.copy() for index,", "None: del a[k] if isinstance(v, list): b = v.copy() for index, a in", "isinstance(request_param, dict): c = request_param.copy() for k, v in c.items(): if v ==", "= a.copy() for k, v in c.items(): if v == None: del a[k]", "coding: UTF-8 -* import json from json import JSONDecodeError import xmltodict from common.common_response_code", "request_param: :return: \"\"\" if isinstance(request_param, list): for index, a in enumerate(request_param): if isinstance(a,", "xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str", ":param request_param: :return: \"\"\" if isinstance(request_param, list): for index, a in enumerate(request_param): if", "== None: del a[k] if isinstance(v, list): b = v.copy() for index, a", "if v == None: del a[k] if isinstance(v, list): b = v.copy() for", "-* import json from json import JSONDecodeError import xmltodict from common.common_response_code import response_code", "JSONDecodeError import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return:", "list): for index, a in enumerate(request_param): if isinstance(a, str): b = request_param.copy() if", "def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list): for index, a", "= v.copy() for index, a in enumerate(b): if a == None: del v[index]", "if a == None: del v[index] if isinstance(request_param, dict): c = request_param.copy() for", "b = v.copy() for index, a in enumerate(b): if a == None: del", "c = a.copy() for k, v in c.items(): if v == None: del", "json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return", "if v == None: del request_param[k] if isinstance(v, list): b = v.copy() for", "\"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str):", "in c.items(): if v == None: del a[k] if isinstance(v, list): b =", "v == None: del a[k] if isinstance(v, list): b = v.copy() for index,", "for index, a in enumerate(request_param): if isinstance(a, str): b = request_param.copy() if a", ":param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\"", "<filename>engine/utils/xml_json_process.py #!/usr/bin/python # -*- coding: UTF-8 -* import json from json import JSONDecodeError", "\"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param):", "v in c.items(): if v == None: del a[k] if isinstance(v, list): b", "list): b = v.copy() for index, a in enumerate(b): if a == None:", "response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str =", "xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list): for index,", "== None: del request_param[k] if isinstance(v, list): b = v.copy() for index, a", "if a == None: del b[index] else: c = a.copy() for k, v", "a == None: del v[index] if isinstance(request_param, dict): c = request_param.copy() for k,", "pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list):", "isinstance(v, list): b = v.copy() for index, a in enumerate(b): if a ==", "for k, v in c.items(): if v == None: del request_param[k] if isinstance(v,", "None: del request_param[k] if isinstance(v, list): b = v.copy() for index, a in", "if isinstance(a, str): b = request_param.copy() if a == None: del b[index] else:", "'\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str,", ":return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def", "import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str", "xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\',", "c = request_param.copy() for k, v in c.items(): if v == None: del", "import JSONDecodeError import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str:", "k, v in c.items(): if v == None: del a[k] if isinstance(v, list):", "for index, a in enumerate(b): if a == None: del v[index] return request_param", "c.items(): if v == None: del a[k] if isinstance(v, list): b = v.copy()", "= json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\"", "def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str", "else: c = a.copy() for k, v in c.items(): if v == None:", "enumerate(request_param): if isinstance(a, str): b = request_param.copy() if a == None: del b[index]", "a in enumerate(request_param): if isinstance(a, str): b = request_param.copy() if a == None:", "in enumerate(request_param): if isinstance(a, str): b = request_param.copy() if a == None: del", "is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list): for index, a in", "a.copy() for k, v in c.items(): if v == None: del a[k] if", "a in enumerate(b): if a == None: del v[index] if isinstance(request_param, dict): c", "b[index] else: c = a.copy() for k, v in c.items(): if v ==", "dict): c = request_param.copy() for k, v in c.items(): if v == None:", "xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str:", ":return: \"\"\" if isinstance(request_param, list): for index, a in enumerate(request_param): if isinstance(a, str):", "= request_param.copy() for k, v in c.items(): if v == None: del request_param[k]", "== None: del v[index] if isinstance(request_param, dict): c = request_param.copy() for k, v", "index, a in enumerate(b): if a == None: del v[index] if isinstance(request_param, dict):", "\"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param: :return:", "json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param", "= request_param.copy() if a == None: del b[index] else: c = a.copy() for", "xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\"", "json import JSONDecodeError import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param", "index, a in enumerate(request_param): if isinstance(a, str): b = request_param.copy() if a ==", ":return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param:", "import xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\"", "enumerate(b): if a == None: del v[index] if isinstance(request_param, dict): c = request_param.copy()", "if isinstance(request_param, list): for index, a in enumerate(request_param): if isinstance(a, str): b =", "b = request_param.copy() if a == None: del b[index] else: c = a.copy()", "xmltodict from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse", "== None: del b[index] else: c = a.copy() for k, v in c.items():", "in enumerate(b): if a == None: del v[index] if isinstance(request_param, dict): c =", "\"\"\" if isinstance(request_param, list): for index, a in enumerate(request_param): if isinstance(a, str): b", "del a[k] if isinstance(v, list): b = v.copy() for index, a in enumerate(b):", "a[k] if isinstance(v, list): b = v.copy() for index, a in enumerate(b): if", "del b[index] else: c = a.copy() for k, v in c.items(): if v", "from common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse =", "#!/usr/bin/python # -*- coding: UTF-8 -* import json from json import JSONDecodeError import", "isinstance(request_param, list): for index, a in enumerate(request_param): if isinstance(a, str): b = request_param.copy()", "json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return:", "import json from json import JSONDecodeError import xmltodict from common.common_response_code import response_code def", "v in c.items(): if v == None: del request_param[k] if isinstance(v, list): b", "common.common_response_code import response_code def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str)", "request_param.copy() for k, v in c.items(): if v == None: del request_param[k] if", "\"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list): for index, a in enumerate(request_param):", "a == None: del b[index] else: c = a.copy() for k, v in", "UTF-8 -* import json from json import JSONDecodeError import xmltodict from common.common_response_code import", "None: del v[index] if isinstance(request_param, dict): c = request_param.copy() for k, v in", ":param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return", "in c.items(): if v == None: del request_param[k] if isinstance(v, list): b =", "xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\"", "str): b = request_param.copy() if a == None: del b[index] else: c =", "json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str def", "= xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if", "isinstance(a, str): b = request_param.copy() if a == None: del b[index] else: c", "if isinstance(request_param, dict): c = request_param.copy() for k, v in c.items(): if v", "v.copy() for index, a in enumerate(b): if a == None: del v[index] return", "return xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param, list): for", "-*- coding: UTF-8 -* import json from json import JSONDecodeError import xmltodict from", "json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param json_str: :return: \"\"\" xml_str", "xmltodict.unparse(json_str, pretty=1) return xml_str def is_none(request_param): \"\"\" :param request_param: :return: \"\"\" if isinstance(request_param,", "for k, v in c.items(): if v == None: del a[k] if isinstance(v,", "v[index] if isinstance(request_param, dict): c = request_param.copy() for k, v in c.items(): if", "= xmltodict.parse(xml_str) json_str = json.dumps(xml_parse, indent=1).replace('\\\\', '\\\\\\\\') return json_str def json_to_xml(json_str): \"\"\" :param", "request_param.copy() if a == None: del b[index] else: c = a.copy() for k,", "# -*- coding: UTF-8 -* import json from json import JSONDecodeError import xmltodict", "v.copy() for index, a in enumerate(b): if a == None: del v[index] if", "def xml_to_json(xml_str): \"\"\" :param xml_str: :return: \"\"\" xml_parse = xmltodict.parse(xml_str) json_str = json.dumps(xml_parse,", "del v[index] if isinstance(request_param, dict): c = request_param.copy() for k, v in c.items():" ]
[ "from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as _ from django.conf import", "instance.url: link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\"", "name = _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance,", "% _(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else:", "instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context def icon_src(self, instance):", "def render(self, context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif", "settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\")", "= BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self,", "BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self, context,", "text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto:", "context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link", "if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url) elif", "instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label':", "as _ from django.conf import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model", "_ from django.conf import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model =", "link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context def", "import plugin_pool from django.utils.translation import ugettext as _ from django.conf import settings from", "placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url)", "from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled", "\"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto)", "CMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as _ from django.conf", "instance.label, 'new_window': instance.new_window, }) return context def icon_src(self, instance): return settings.STATIC_URL + u\"cms/images/plugins/link.png\"", "'new_window': instance.new_window, }) return context def icon_src(self, instance): return settings.STATIC_URL + u\"cms/images/plugins/link.png\" plugin_pool.register_plugin(BootstrapButtonPlugin)", "cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as _ from django.conf import settings", "import ugettext as _ from django.conf import settings from models import BootstrapButtonPlugin class", "= \"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window,", "from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as", "u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url()", "class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template =", "link = \"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window':", "_(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link", "from django.utils.translation import ugettext as _ from django.conf import settings from models import", "import CMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as _ from", "cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext as _", "django.conf import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name", "ugettext as _ from django.conf import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase):", "BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template", "= _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder):", "BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\"", "_(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if", "= True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto: link", "= \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\" %", "render(self, context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url:", "True render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto: link =", "instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link:", "link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link: link", "import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled = True", "instance, placeholder): if instance.mailto: link = u\"mailto:%s\" % _(instance.mailto) elif instance.url: link =", "from django.conf import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin", "elif instance.url: link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link =", "= _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link':", "context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return", "'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context def icon_src(self,", "model = BootstrapButtonPlugin name = _(\"Button\") text_enabled = True render_template = \"plugins/bootstrap_button.html\" def", "models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name = _(\"Button\") text_enabled =", "_(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link,", "= u\"mailto:%s\" % _(instance.mailto) elif instance.url: link = _(instance.url) elif instance.page_link: link =", "elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link, 'size':", "else: link = \"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label,", "django.utils.translation import ugettext as _ from django.conf import settings from models import BootstrapButtonPlugin", "'label': instance.label, 'new_window': instance.new_window, }) return context def icon_src(self, instance): return settings.STATIC_URL +", "plugin_pool from django.utils.translation import ugettext as _ from django.conf import settings from models", "= instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type,", "instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link, 'size': instance.button_size,", "import settings from models import BootstrapButtonPlugin class BootstrapButtonPlugin(CMSPluginBase): model = BootstrapButtonPlugin name =", "instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context def icon_src(self, instance): return settings.STATIC_URL", "'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context def icon_src(self, instance): return", "link = instance.page_link.get_absolute_url() else: link = \"\" context.update({ 'link': link, 'size': instance.button_size, 'type':", "link = _(instance.url) elif instance.page_link: link = instance.page_link.get_absolute_url() else: link = \"\" context.update({", "'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, }) return context", "\"\" context.update({ 'link': link, 'size': instance.button_size, 'type': instance.button_type, 'label': instance.label, 'new_window': instance.new_window, })", "render_template = \"plugins/bootstrap_button.html\" def render(self, context, instance, placeholder): if instance.mailto: link = u\"mailto:%s\"" ]
[ "Model Description \"\"\" name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description:", "models # Create your models here. class Category(models.Model): \"\"\" Description: Model Description \"\"\"", "models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name =", "Description \"\"\" name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description: Model", "Skill(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) category = models.ForeignKey('Category', on_delete=models.CASCADE)", "\"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) category = models.ForeignKey('Category', on_delete=models.CASCADE) class", "Description: Model Description \"\"\" name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\"", "class Category(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) class Meta: pass", "\"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model):", "class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) category = models.ForeignKey('Category',", "import models # Create your models here. class Category(models.Model): \"\"\" Description: Model Description", "\"\"\" name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description: Model Description", "your models here. class Category(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50)", "name = models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\"", "django.db import models # Create your models here. class Category(models.Model): \"\"\" Description: Model", "models here. class Category(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) class", "from django.db import models # Create your models here. class Category(models.Model): \"\"\" Description:", "pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) category =", "# Create your models here. class Category(models.Model): \"\"\" Description: Model Description \"\"\" name", "Description: Model Description \"\"\" name = models.CharField(max_length=50) category = models.ForeignKey('Category', on_delete=models.CASCADE) class Meta:", "Model Description \"\"\" name = models.CharField(max_length=50) category = models.ForeignKey('Category', on_delete=models.CASCADE) class Meta: pass", "Create your models here. class Category(models.Model): \"\"\" Description: Model Description \"\"\" name =", "= models.CharField(max_length=50) class Meta: pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name", "class Meta: pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50)", "Category(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) class Meta: pass class", "here. class Category(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) class Meta:", "Meta: pass class Skill(models.Model): \"\"\" Description: Model Description \"\"\" name = models.CharField(max_length=50) category" ]
[ "\"..\") import smart_utils import os import glob def test_draw(): test_files = glob.glob(\"Data/*\") for", "sys.path.insert(0, \"..\") import smart_utils import os import glob def test_draw(): test_files = glob.glob(\"Data/*\")", "sys sys.path.insert(0, \"..\") import smart_utils import os import glob def test_draw(): test_files =", "<reponame>mwang87/SMART_NMR import sys sys.path.insert(0, \"..\") import smart_utils import os import glob def test_draw():", "glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file in test_files: print(test_file) smart_utils.draw_nmr(test_file, \"{}.png\".format(os.path.basename(test_file)))", "import os import glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file in test_files:", "os import glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file in test_files: print(test_file)", "import sys sys.path.insert(0, \"..\") import smart_utils import os import glob def test_draw(): test_files", "import glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file in test_files: print(test_file) smart_utils.draw_nmr(test_file,", "smart_utils import os import glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file in", "import smart_utils import os import glob def test_draw(): test_files = glob.glob(\"Data/*\") for test_file" ]
[ "= 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT =", "= 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT =", "1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT = 0.01", "NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT", "NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT", "= 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT =", "# nn parameters INPUT_NUM = 2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT", "= 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT =", "= 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT =", "2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0", "CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT", "parameters INPUT_NUM = 2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2", "10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0", "MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT", "NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT", "= 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT =", "WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT", "-1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0", "OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT", "= 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT =", "= -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT =", "= -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT = 0.01 EPSIRON_UPPER_LIMIT =", "5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0", "= 10 CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT =", "= 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT =", "1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0", "2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10", "nn parameters INPUT_NUM = 2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT =", "= 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT =", "= 2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT =", "BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT", "WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT", "INPUT_NUM = 2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT", "1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0", "2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT = 1", "1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2", "MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5 CONNECTION_NUM_UPPER_LIMIT = 10 CONNECTION_NUM_LOWER_LIMIT", "-1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT = 0.01 EPSIRON_UPPER_LIMIT = 1.0", "2 OUTPUT_NUM = 1 NORMAL_NUM_UPPER_LIMIT = 5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2", "5 NORMAL_NUM_LOWER_LIMIT = 2 MODULATION_NUM_UPPER_LIMIT = 2 MODULATION_NUM_LOWER_LIMIT = 2 NEURON_NUM_UPPER_LIMIT = 5", "BIAS_LOWER_LIMIT = -1.0 EVOLUTION_PARAM_UPPER_LIMIT = 1.0 EVOLUTION_PARAM_LOWER_LIMIT = -1.0 EPSIRON_LOWER_LIMIT = 0.01 EPSIRON_UPPER_LIMIT", "CONNECTION_NUM_LOWER_LIMIT = 1 WEIGHT_UPPER_LIMIT = 1.0 WEIGHT_LOWER_LIMIT = -1.0 BIAS_UPPER_LIMIT = 1.0 BIAS_LOWER_LIMIT" ]
[ "\"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\":", "34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\":", "\"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed", "for both directions going across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\",", "line for both directions going across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\",", "dirname, abspath, join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\"", "{\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': #", "{\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642,", "702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\":", "642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\":", "# \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\":", "{\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\":", "179}}} } # tx2: same line for both directions going across two lanes", "# tx2: same line for both directions going across two lanes CLASSES =", "two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\",", "515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046,", "= { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\":", "{\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104,", "# 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\":", "# \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\":", "{\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393,", "= \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS =", "= ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\":", "from os.path import dirname, abspath, join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..'))", "COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\" },", "\"y\": 173}, Coords from first run, bad lines # \"point2\": {\"x\": 702, \"y\":", "{\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\":", "across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES =", "393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\":", "abspath, join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS", "'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\":", "\"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf':", "= ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner':", "541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} } # tx2: same line", "72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\":", "258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, #", "\"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\":", "# \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\":", "['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173},", "[\"car\", \"truck\", \"person\", \"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES = {", "join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS =", "'..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier']", "PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS", "# {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from first run, bad lines", "\"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed for second", "\"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150},", "\"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\":", "same line for both directions going across two lanes CLASSES = [\"car\", \"truck\",", "1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\":", "dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS", "103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\":", "CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\",", "OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS =", "second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES", "\"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab':", "\"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960,", "going across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES", "= {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from first run,", "{'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from first run, bad lines #", "abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2',", "lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\",", "\"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}}", "568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\":", "932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\":", "# \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494},", "\"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433,", "960, \"y\": 179}}} } # tx2: same line for both directions going across", "\"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\":", "1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\":", "DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf',", "\"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740},", "\"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\":", "= abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano',", "from first run, bad lines # \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\":", "Coords from first run, bad lines # \"point2\": {\"x\": 702, \"y\": 864}}, #", "= dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS = \"data\" STATIONS = ['ecdf', 'citylab']", "{\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445},", "\"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\":", "# \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}},", "\"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}},", "\"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\":", "{ \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} }", "{\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\":", "= [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"]", "\"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\"", "'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords", "run, bad lines # \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\":", "{\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\":", "# COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\":", "\"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, #", "\"point2\": {\"x\": 960, \"y\": 179}}} } # tx2: same line for both directions", "\"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\":", "= {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\" }, \"citylab\":", "\"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\",", "import dirname, abspath, join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS =", "lines # \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\":", "{\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\":", "\"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} } # tx2: same line for", "{ # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\",", "{\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\":", "494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132},", "\"y\": 150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34,", "'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}}", "os.path import dirname, abspath, join DIR_PATH = dirname(abspath(__file__)) OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..')) PATH_TO_RECORDINGS", "200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\":", "{\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} } # tx2:", "740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541,", "# {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\":", "{ \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\" }, \"citylab\": {\"a7317e7a-85da-4f08-8efc-4e90a2a2b2b8\": \"platzderluftbruecke\"}", "\"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\":", "{\"x\": 960, \"y\": 179}}} } # tx2: same line for both directions going", "{'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from first run, bad", "COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": {", "\"y\": 179}}} } # tx2: same line for both directions going across two", "\"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932,", "{\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568,", "['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\":", "\"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}},", "1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\":", "STATIONS = ['ecdf', 'citylab'] BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': #", "{\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718,", "235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, #", "\"person\", \"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\":", "\"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200},", "{\"x\": 718, \"y\": 173}, Coords from first run, bad lines # \"point2\": {\"x\":", "975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\": 150}, \"point2\": {\"x\": 642, \"y\":", "\"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}},", "718, \"y\": 173}, Coords from first run, bad lines # \"point2\": {\"x\": 702,", "for second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}}", "# changed for second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\",", "directions going across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] #", "{\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\":", "{\"point1\": {\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": {", "\"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\":", "\"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed for second ecdf-recording", "\"truck\", \"person\", \"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES = { #", "} # tx2: same line for both directions going across two lanes CLASSES", "'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from", "bad lines # \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\": {\"x\": 515,", "{\"point1\": {\"x\": 718, \"y\": 173}, Coords from first run, bad lines # \"point2\":", "173}, Coords from first run, bad lines # \"point2\": {\"x\": 702, \"y\": 864}},", "\"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\" }, \"citylab\": {\"a7317e7a-85da-4f08-8efc-4e90a2a2b2b8\": \"platzderluftbruecke\"} }", "\"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}},", "{\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\":", "{\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\", \"25b11f4a-0d23-4878-9050-5b5a06834adc\": \"walking_lindner\" }, \"citylab\": {\"a7317e7a-85da-4f08-8efc-4e90a2a2b2b8\":", "\"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166},", "{\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\": 740}, \"point2\":", "1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\": 258}}, \"walking_lindner\": {\"point1\": {\"x\": 568, \"y\":", "BOARDS = ['nano', 'tx2', 'xavier'] COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718,", "718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975, \"y\":", "\"platzderluftbruecke\": {\"point1\": {\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} } #", "132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\":", "\"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed for", "first run, bad lines # \"point2\": {\"x\": 702, \"y\": 864}}, # \"cross\": {\"point1\":", "864}}, # \"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\":", "both directions going across two lanes CLASSES = [\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"]", "\"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\":", "377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211, \"y\": 226}}, \"lindner\":", "# CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed for second ecdf-recording #", "= [\"car\", \"truck\", \"person\", \"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES =", "\"cross\": {\"point1\": {\"x\": 515, \"y\": 494}, # \"point2\": {\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\":", "166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\":", "CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] # changed for second ecdf-recording # COUNTER_LINE_NAMES", "changed for second ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\":", "{\"x\": 932, \"y\": 377}}}, {\"bundesstrasse\": {\"point1\": {\"x\": 1046, \"y\": 132}, \"point2\": {\"x\": 1211,", "\"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\": \"walking_bundesstrasse\",", "COUNTER_LINE_COORDS = {'ecdf': # {'ecdf-lindner': {\"point1\": {\"x\": 718, \"y\": 173}, Coords from first", "ecdf-recording # COUNTER_LINE_NAMES = { # \"ecdf\": {\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES =", "{\"x\": 34, \"y\": 740}, \"point2\": {\"x\": 1433, # \"y\": 103}} \"citylab\": { \"platzderluftbruecke\":", "{\"a4ad8491-c790-4078-9092-94ac1e3e0b46\": \"ecdf-lindner\", \"882e3178-408a-4e3e-884f-d8d2290b47f0\": \"cross\"}} COUNTER_LINE_NAMES = {\"ecdf\": { \"c9f71c06-6baf-47c3-9ca2-4c26676b7336\": \"bundesstrasse\", \"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc\": \"lindner\", \"240885bb-636e-41f2-8448-bfcdbabd42b5\":", "{\"x\": 541, \"y\": 445}, \"point2\": {\"x\": 960, \"y\": 179}}} } # tx2: same", "150}, \"point2\": {\"x\": 642, \"y\": 235}}}, # 'citylab': # {\"point1\": {\"x\": 34, \"y\":", "[\"car\", \"truck\", \"bicycle\", \"bus\", \"motorbike\"] # CLASSES = [\"car\", \"truck\", \"person\", \"bus\"] #", "445}, \"point2\": {\"x\": 960, \"y\": 179}}} } # tx2: same line for both", "{\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\": {\"point1\": {\"x\": 1104, \"y\": 200}, \"point2\": {\"x\": 975,", "tx2: same line for both directions going across two lanes CLASSES = [\"car\",", "226}}, \"lindner\": {\"point1\": {\"x\": 393, \"y\": 166}, \"point2\": {\"x\": 718, \"y\": 72}}, \"walking_bundesstrasse\":" ]
[ "not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if", "API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code == 404: print('User not", "exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code == 404: print('User", "verification for testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True)", "management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) # User", "can disable certificate verification for testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671',", "CA bundle. # You can disable certificate verification for testing by passing in", "import management if __name__ == '__main__': # If using a self-signed certificate, change", "certificate verification for testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest',", "# User that does not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except", "an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code == 404:", "to point at your CA bundle. # You can disable certificate verification for", "in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a", "from amqpstorm import management if __name__ == '__main__': # If using a self-signed", "user print(API.user.get('my_user')) # User that does not exist throws an exception API.user.delete('my_user') try:", "exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code", "try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code == 404: print('User not found')", "# If using a self-signed certificate, change verify=True to point at your CA", "certificate, change verify=True to point at your CA bundle. # You can disable", "that does not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as", "management if __name__ == '__main__': # If using a self-signed certificate, change verify=True", "== '__main__': # If using a self-signed certificate, change verify=True to point at", "disable certificate verification for testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',", "a self-signed certificate, change verify=True to point at your CA bundle. # You", "Get a user print(API.user.get('my_user')) # User that does not exist throws an exception", "verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a user", "passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get", "'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) # User that", "does not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why:", "testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password')", "by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') #", "API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) # User that does not exist", "verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) # User that does not", "your CA bundle. # You can disable certificate verification for testing by passing", "bundle. # You can disable certificate verification for testing by passing in verify=False.", "at your CA bundle. # You can disable certificate verification for testing by", "amqpstorm import management if __name__ == '__main__': # If using a self-signed certificate,", "'password') # Get a user print(API.user.get('my_user')) # User that does not exist throws", "# Get a user print(API.user.get('my_user')) # User that does not exist throws an", "'__main__': # If using a self-signed certificate, change verify=True to point at your", "if __name__ == '__main__': # If using a self-signed certificate, change verify=True to", "print(API.user.get('my_user')) # User that does not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND')", "point at your CA bundle. # You can disable certificate verification for testing", "a user print(API.user.get('my_user')) # User that does not exist throws an exception API.user.delete('my_user')", "__name__ == '__main__': # If using a self-signed certificate, change verify=True to point", "verify=True to point at your CA bundle. # You can disable certificate verification", "self-signed certificate, change verify=True to point at your CA bundle. # You can", "'guest', verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) # User that does", "User that does not exist throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError", "throws an exception API.user.delete('my_user') try: API.user.get('NOT_FOUND') except management.ApiError as why: if why.error_code ==", "API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user'))", "If using a self-signed certificate, change verify=True to point at your CA bundle.", "You can disable certificate verification for testing by passing in verify=False. API =", "<gh_stars>100-1000 from amqpstorm import management if __name__ == '__main__': # If using a", "for testing by passing in verify=False. API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user',", "# You can disable certificate verification for testing by passing in verify=False. API", "change verify=True to point at your CA bundle. # You can disable certificate", "= management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest', 'guest', verify=True) API.user.create('my_user', 'password') # Get a user print(API.user.get('my_user')) #", "using a self-signed certificate, change verify=True to point at your CA bundle. #" ]
[ "f: long_description = f.read() setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points =", "react-native developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author = \"<NAME>\", author_email", "\"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version", "to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author = \"<NAME>\", author_email = \"<EMAIL>\",", "setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact =", "import re from setuptools import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1)", "= re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description =", "= \"makeReact is a python package which helps react and react-native developer to", "version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description", "}, version = version, description = \"makeReact is a python package which helps", "description = \"makeReact is a python package which helps react and react-native developer", "which helps react and react-native developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\",", "open(\"README.md\", \"r\") as f: long_description = f.read() setup( name = \"makeReact\", packages =", "= version, description = \"makeReact is a python package which helps react and", "= [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version,", "re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description = f.read() setup( name =", "[\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version, description", "= makeReact.script:main'] }, version = version, description = \"makeReact is a python package", "= { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version, description = \"makeReact", "react and react-native developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author =", "version, description = \"makeReact is a python package which helps react and react-native", "<gh_stars>0 import re from setuptools import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M", "as f: long_description = f.read() setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points", "entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version, description =", "and react-native developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author = \"<NAME>\",", "packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] }, version =", "\"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version, description = \"makeReact is a", "makeReact.script:main'] }, version = version, description = \"makeReact is a python package which", "= f.read() setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\":", ").group(1) with open(\"README.md\", \"r\") as f: long_description = f.read() setup( name = \"makeReact\",", "package which helps react and react-native developer to speed-up their develoment process.\", long_description=long_description,", "with open(\"README.md\", \"r\") as f: long_description = f.read() setup( name = \"makeReact\", packages", "long_description = f.read() setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points = {", "developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author = \"<NAME>\", author_email =", "a python package which helps react and react-native developer to speed-up their develoment", "setuptools import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\")", "python package which helps react and react-native developer to speed-up their develoment process.\",", "helps react and react-native developer to speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author", "is a python package which helps react and react-native developer to speed-up their", "{ \"console_scripts\": ['makeReact = makeReact.script:main'] }, version = version, description = \"makeReact is", "\"r\") as f: long_description = f.read() setup( name = \"makeReact\", packages = [\"makeReact\"],", "from setuptools import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\",", "setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f:", "f.read() setup( name = \"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact", "['makeReact = makeReact.script:main'] }, version = version, description = \"makeReact is a python", "\"makeReact is a python package which helps react and react-native developer to speed-up", "speed-up their develoment process.\", long_description=long_description, long_description_content_type=\"text/markdown\", author = \"<NAME>\", author_email = \"<EMAIL>\", )", "re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description = f.read()", "re from setuptools import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with", "= \"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main'] },", "open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description = f.read() setup( name", "name = \"makeReact\", packages = [\"makeReact\"], entry_points = { \"console_scripts\": ['makeReact = makeReact.script:main']", "'^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as f: long_description = f.read() setup(", "version = version, description = \"makeReact is a python package which helps react", "import setup version = re.search( '^__version__\\s*=\\s*\"(.*)\"', open('makeReact/script.py').read(), re.M ).group(1) with open(\"README.md\", \"r\") as" ]
[ "entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "self.assertItemsIn(changed_files, targets, \"all modified files should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets),", "`orig_digest` does not ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def", "language governing permissions and # limitations under the License. import unittest2 import tests", "disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def", "in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch:", "in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have patches in \"", "self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in self.pdarchive.patches: path", "entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in self.pdarchive.patches: path =", "len(targets), \"number of modified items sholud match \" \"number of patches in pdar\")", "% (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in", "ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase):", "for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest", "for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in", "def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir,", "this file except in compliance with the License. # You may obtain a", "def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir,", "entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in self.pdarchive.patches: path", "destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written", "specific language governing permissions and # limitations under the License. import unittest2 import", "self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry", "path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__)))", "str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in self.pdarchive.patches: path =", "entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash", "test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "on dataset''' targets = [patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files,", "pdar file and validate results - clone original dataset - apply loaded pdar", "ANY KIND, either express or implied. # See the License for the specific", "against files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest,", "entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and validate results - clone", "test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for", "in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch:", "loaded pdar file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def", "entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s' % (entry.target,", "entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def", "class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0)", "(entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and validate results -", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s", "dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive) if __name__ == \"__main__\": tests.main()", "targets based on dataset''' targets = [patch.target for patch in self.pdarchive.patches] changed_files =", "OF ANY KIND, either express or implied. # See the License for the", "os import random import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self):", "loaded pdar file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive)", "file is part of pdar. # # Copyright 2011 <NAME> # # Licensed", "return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def", "import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was", "in memory pdar and validate results - clone original dataset - apply loaded", "files should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items", "in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for", "permissions and # limitations under the License. import unittest2 import tests import pdar", "entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest", "(%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for", "self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded'''", "in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for", "entry.hash_type, 'dest hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self):", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match \" \"number", "have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match", "entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def", "'''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "%s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "def test_0002_targets(self): '''validate correct targets based on dataset''' targets = [patch.target for patch", "'''validate `orig_digest` does not ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest)", "'''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest", "match \" \"number of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare", "def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values", "required by applicable law or agreed to in writing, software # distributed under", "pdar. # # Copyright 2011 <NAME> # # Licensed under the Apache License,", "[patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should", "applicable law or agreed to in writing, software # distributed under the License", "\" \"number of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever", "hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in", "Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "import random import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure", "self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self):", "loaded pdar file and validate results - clone original dataset - apply loaded", "test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target)", "or agreed to in writing, software # distributed under the License is distributed", "each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches])", "and validate results - clone original dataset - apply loaded pdar file to", "'''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self):", "def test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target for entry in", "original dataset - apply loaded pdar file to cloned dataset - filecmp.cmpfiles against", "test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest,", "path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s):", "written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive()", "self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self):", "def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"unchanged files should not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified", "`target` values for ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on dataset'''", "dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written to", "match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against", "self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each", "compliance with the License. # You may obtain a copy of the License", "test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches],", "test_0004_apply_archive(self): '''Apply in memory pdar and validate results - clone original dataset -", "2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and", "pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp()", "for the specific language governing permissions and # limitations under the License. import", "patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "import pdar import os import random import shutil from pkg_resources import parse_version class", "License. import unittest2 import tests import pdar import os import random import shutil", "to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property", "values for ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry", "pdar file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive) if", "ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest`", "not use this file except in compliance with the License. # You may", "is part of pdar. # # Copyright 2011 <NAME> # # Licensed under", "modified items sholud match \" \"number of patches in pdar\") def test_0003_digest_values(self): '''validate", "items sholud match \" \"number of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest`", "License, Version 2.0 (the \"License\"); # you may not use this file except", "of pdar. # # Copyright 2011 <NAME> # # Licensed under the Apache", "pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target`", "import tests import pdar import os import random import shutil from pkg_resources import", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "'dest hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply", "loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def", "self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar", "in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches", "to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive) if __name__ ==", "self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and", "def test_0001_basics(self): '''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self):", "files should not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files", "- clone original dataset - apply loaded pdar file to cloned dataset -", "[entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry'''", "# you may not use this file except in compliance with the License.", "def test_0004_apply_archive(self): '''Apply loaded pdar file and validate results - clone original dataset", "filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file", "results - clone original dataset - apply loaded pdar file to cloned dataset", "\"number of modified items sholud match \" \"number of patches in pdar\") def", "agreed to in writing, software # distributed under the License is distributed on", "does not ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self):", "''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written to disk'''", "the specific language governing permissions and # limitations under the License. import unittest2", "dataset''' targets = [patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets,", "for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not", "(the \"License\"); # you may not use this file except in compliance with", "'''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target", "test_0004_apply_archive(self): '''Apply loaded pdar file and validate results - clone original dataset -", "unittest2 import tests import pdar import os import random import shutil from pkg_resources", "pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and contains", "# Unless required by applicable law or agreed to in writing, software #", "targets, \"all modified files should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number", "have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have patches", "entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar", "by applicable law or agreed to in writing, software # distributed under the", "for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest`", "def test_0001_basics(self): '''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry", "for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual(", "self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self):", "self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare", "entry.hash_type, 'orig hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self):", "(entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in self.pdarchive.patches:", "(%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and", "should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud", "self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s' % (entry.target, entry.type_code,", "pdar import os import random import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase):", "file except in compliance with the License. # You may obtain a copy", "for ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry in", "# limitations under the License. import unittest2 import tests import pdar import os", "'''validate `orig_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual(", "entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest", "License for the specific language governing permissions and # limitations under the License.", "entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values", "to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def", "to in writing, software # distributed under the License is distributed on an", "dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure", "setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self):", "`orig_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path,", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "under the License. import unittest2 import tests import pdar import os import random", "random import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive", "of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`'''", "self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and validate results - clone original", "apply loaded pdar file to cloned dataset - filecmp.cmpfiles against destination dataset '''", "patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have patches in", "[entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and validate", "or implied. # See the License for the specific language governing permissions and", "%s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar", "os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s' %", "of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual(", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'''Apply loaded pdar file and validate results - clone original dataset - apply", "'''Apply in memory pdar and validate results - clone original dataset - apply", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def", "for entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest`", "self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type,", "validate results - clone original dataset - apply loaded pdar file to cloned", "%s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against files'''", "self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets,", "cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive) if __name__ == \"__main__\":", "= self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was", "was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches))", "targets, \"unchanged files should not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for", "should not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should", "you may not use this file except in compliance with the License. #", "changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches in \"", "ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def", "'''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of", "entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values", "# # Copyright 2011 <NAME> # # Licensed under the Apache License, Version", "def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in", "import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and contains patches'''", "self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on dataset''' targets = [patch.target", "in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in", "for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in", "pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`''' for entry in", "pdar file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class", "for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and validate results", "created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on", "str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and validate results - clone original", "`orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for", "use this file except in compliance with the License. # You may obtain", "\" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have patches in \" \"pdar\")", "shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created,", "def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in", "memory pdar and validate results - clone original dataset - apply loaded pdar", "def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "of modified items sholud match \" \"number of patches in pdar\") def test_0003_digest_values(self):", "'''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct", "def test_0004_apply_archive(self): '''Apply in memory pdar and validate results - clone original dataset", "file and validate results - clone original dataset - apply loaded pdar file", "self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s", "targets = [patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged", "%s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and validate", "2.0 (the \"License\"); # you may not use this file except in compliance", "based on dataset''' targets = [patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files", "for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for", "clone original dataset - apply loaded pdar file to cloned dataset - filecmp.cmpfiles", "'''validate correct targets based on dataset''' targets = [patch.target for patch in self.pdarchive.patches]", "in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for", "ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches])", "test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches],", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "test_0002_targets(self): '''validate correct targets based on dataset''' targets = [patch.target for patch in", "= self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches in \" \"pdar\")", "in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and validate results - clone", "self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.loaded_pdarchive) if __name__", "parse_version class ArchiveTest(tests.ArchiveTestCase): def test_0001_basics(self): '''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive)", "either express or implied. # See the License for the specific language governing", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "[entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare", "test_0001_basics(self): '''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self):", "self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files''' for entry in self.pdarchive.patches:", "number of entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah entry'''", "\"all modified files should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of", "patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on dataset''' targets =", "the License. # You may obtain a copy of the License at #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "limitations under the License. import unittest2 import tests import pdar import os import", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was", "entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash", "self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self):", "path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__)))", "\"number of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever match", "pdar and validate results - clone original dataset - apply loaded pdar file", "len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target for entry", "self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match \" \"number of patches in", "'''validate `dest_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual(", "with the License. # You may obtain a copy of the License at", "file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive", "hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest`", "<reponame>jpenney/pdar<gh_stars>1-10 # This file is part of pdar. # # Copyright 2011 <NAME>", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry", "entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file and validate results -", "loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive))", "part of pdar. # # Copyright 2011 <NAME> # # Licensed under the", "law or agreed to in writing, software # distributed under the License is", "and # limitations under the License. import unittest2 import tests import pdar import", "for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each entry''' self.assertItemsEqual(", "the License for the specific language governing permissions and # limitations under the", "def test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`''' for entry in self.pdarchive.patches:", "- apply loaded pdar file to cloned dataset - filecmp.cmpfiles against destination dataset", "sholud match \" \"number of patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does", "entries''' self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches in \" \"pdar\") self.assertItemsIn(changed_files,", "entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s' % (entry.target,", "governing permissions and # limitations under the License. import unittest2 import tests import", "self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self):", "def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare", "% (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory pdar and validate results", "file to cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase):", "= os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s): %s'", "self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on dataset''' targets = [patch.target for", "- filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar", "LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive", "not have patches in \" \"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have", "in compliance with the License. # You may obtain a copy of the", "patches in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`''' for", "'orig hash mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match \" \"number of patches", "= [patch.target for patch in self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files", "test_0003_digest_dest(self): '''validate `dest_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import unittest2 import tests import pdar import os import random import shutil from", "mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0003_digest_dest(self): '''validate `dest_digest` against", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "modified files should have patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified", "entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def", "tests import pdar import os import random import shutil from pkg_resources import parse_version", "files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type,", "dataset - apply loaded pdar file to cloned dataset - filecmp.cmpfiles against destination", "was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive =", "[entry.orig_digest for entry in self.pdarchive.patches]) def test_0003_dest_digests(self): '''Compare `dest_digest` values for each entry'''", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path)) class", "self.loaded_pdarchive.patches], [entry.target for entry in self.pdarchive.patches]) def test_0003_orig_digests(self): '''Compare `orig_digest` values for each", "# This file is part of pdar. # # Copyright 2011 <NAME> #", "'''Compare `orig_digest` values for each entry''' self.assertItemsEqual( [entry.orig_digest for entry in self.loaded_pdarchive.patches], [entry.orig_digest", "`dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate `orig_digest` against files'''", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "This file is part of pdar. # # Copyright 2011 <NAME> # #", "\"pdar\") self.assertItemsIn(changed_files, targets, \"all modified files should have patches in \" \"pdar\") self.assertEqual(len(changed_files),", "\" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match \" \"number of", "self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self): '''ensure pdar file was written to disk''' self.assertTrue(os.path.exists(self.pdarchive_path))", "not ever match `dest_digest`''' for entry in self.pdarchive.patches: self.assertNotEqual(entry.orig_digest, entry.dest_digest) def test_0003_digest_orig(self): '''validate", "the License. import unittest2 import tests import pdar import os import random import", "correct targets based on dataset''' targets = [patch.target for patch in self.pdarchive.patches] changed_files", "`dest_digest` against files''' for entry in self.pdarchive.patches: path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path,", "self.assertEqual(len(self.pdarchive.patches), len(self.loaded_pdarchive.patches)) def test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target for", "for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig", "in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded pdar file", "test_0003_targets(self): '''Compare `target` values for ecah entry''' self.assertItemsEqual( [entry.target for entry in self.loaded_pdarchive.patches],", "for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply loaded", "was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based", "import os import random import shutil from pkg_resources import parse_version class ArchiveTest(tests.ArchiveTestCase): def", "against files''' for entry in self.pdarchive.patches: path = os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest,", "file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries''' self.assertEqual(len(self.pdarchive.patches),", "class LoadedArchiveFileTest(tests.ArchiveFileTestCase): def setUp(self): super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return", "each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches])", "`dest_digest` values for each entry''' self.assertItemsEqual( [entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for", "pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive) self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive)) def test_0002_count(self): '''Compare number of entries'''", "# Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0", "patches in \" \"pdar\") self.assertEqual(len(changed_files), len(targets), \"number of modified items sholud match \"", "contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets based on dataset''' targets", "@property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure pdar file was loaded''' self.assertIsNotNone(self.loaded_pdarchive)", "[entry.dest_digest for entry in self.loaded_pdarchive.patches], [entry.dest_digest for entry in self.pdarchive.patches]) def test_0004_apply_archive(self): '''Apply", "in pdar\") def test_0003_digest_values(self): '''validate `orig_digest` does not ever match `dest_digest`''' for entry", "test_0001_basics(self): '''ensure PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate", "= os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s'", "super(LoadedArchiveFileTest, self).setUp() self._loaded_pdarchive = self.load_pdarchive() @property def loaded_pdarchive(self): return self._loaded_pdarchive def test_0001_basics(self): '''Ensure", "self.pdarchive.patches] changed_files = self.changed_files self.assertItemsNotIn(self.same_files, targets, \"unchanged files should not have patches in", "path = os.path.join(self.mod_dir, entry.target) self.assertFileHashEqual( path, entry.dest_digest, entry.hash_type, 'dest hash mismatch: %s (%s):", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "PDArchive was created, and contains patches''' self.assertIsNotNone(self.pdarchive) self.assertGreater(len(self.pdarchive.patches),0) def test_0002_targets(self): '''validate correct targets", "mismatch: %s (%s): %s' % (entry.target, entry.type_code, str(entry.__dict__))) def test_0004_apply_archive(self): '''Apply in memory", "self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s' % (entry.target, entry.type_code,", "os.path.join(self.orig_dir, entry.target) self.assertFileHashEqual( path, entry.orig_digest, entry.hash_type, 'orig hash mismatch: %s (%s): %s' %", "cloned dataset - filecmp.cmpfiles against destination dataset ''' self._test_apply_pdarchive(self.pdarchive) class ArchiveFileTest(tests.ArchiveFileTestCase): def test_0001_basics(self):" ]
[ "\"\"\" if not intervals: return [] intervals = sorted(intervals) res = [intervals[0]] for", "Solution(object): def XXX(self, intervals): \"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals", "in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])] else: res.append(intervals[i])", "\"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals = sorted(intervals) res =", "class Solution(object): def XXX(self, intervals): \"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return []", "return [] intervals = sorted(intervals) res = [intervals[0]] for i in range(1, len(intervals)):", "intervals): \"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals = sorted(intervals) res", "[intervals[0]] for i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1],", "sorted(intervals) res = [intervals[0]] for i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]:", "res = [intervals[0]] for i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1]", "= sorted(intervals) res = [intervals[0]] for i in range(1, len(intervals)): if intervals[i][0] <=", "i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])] else:", "XXX(self, intervals): \"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals = sorted(intervals)", "def XXX(self, intervals): \"\"\" 轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals =", "len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])] else: res.append(intervals[i]) return res", "轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改 \"\"\" if not intervals: return [] intervals = sorted(intervals) res = [intervals[0]]", "if not intervals: return [] intervals = sorted(intervals) res = [intervals[0]] for i", "[] intervals = sorted(intervals) res = [intervals[0]] for i in range(1, len(intervals)): if", "for i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])]", "range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])] else: res.append(intervals[i]) return", "intervals = sorted(intervals) res = [intervals[0]] for i in range(1, len(intervals)): if intervals[i][0]", "= [intervals[0]] for i in range(1, len(intervals)): if intervals[i][0] <= res[-1][1]: res[-1] =", "intervals: return [] intervals = sorted(intervals) res = [intervals[0]] for i in range(1,", "not intervals: return [] intervals = sorted(intervals) res = [intervals[0]] for i in" ]
[ "reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path =", "sess = tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value =", "fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')])", "as tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as", "with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata =", "'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds =", "import tensorflow as tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'),", "= tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = []", "json import os import tensorflow as tf from learning_to_simulate import reading_utils def _read_metadata(data_path):", "import functools import json import os import tensorflow as tf from learning_to_simulate import", "open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path)", "'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end", "metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for _ in", "_read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next()", "learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read())", "import os import tensorflow as tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with", "def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\"", "json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds =", "os import tensorflow as tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path,", "ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end =", "ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value", "metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n", "import json import os import tensorflow as tf from learning_to_simulate import reading_utils def", "as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path,", "'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds", "ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess", "tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for", "return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds", "tensorflow as tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt')", "data_path = \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial(", "\"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata))", "= _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n =", "'test.tfrecord'))) value = [] for i in range(0, end): print(str(i)) v = sess.run(n)", "= tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess =", "for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in range(0, end):", "<reponame>abdelabdalla/deepmind-research import functools import json import os import tensorflow as tf from learning_to_simulate", "tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session()", "end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i", "_read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path = \"/tmp/WaterDrop\" metadata", "_ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in range(0, end): print(str(i))", "tf from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:", "functools import json import os import tensorflow as tf from learning_to_simulate import reading_utils", "from learning_to_simulate import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return", "reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for _", "n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path,", "value = [] for i in range(0, end): print(str(i)) v = sess.run(n) value.append(v)", "ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for", "= ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord')))", "sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in range(0,", "import reading_utils def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) data_path", "in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in range(0, end): print(str(i)) v", "= \"/tmp/WaterDrop\" metadata = _read_metadata(data_path) ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example,", "tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in range(0, end): print(str(i)) v =", "= ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) n = ds.make_one_shot_iterator().get_next() sess = tf.Session() end = sum(1", "= sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord'))) value = [] for i in" ]
[ "model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel):", "not load model\") if model._training_config is None: raise RuntimeError(\"Model _training_config is None\") model.compile(", "raise RuntimeError(\"Can not load model\") if model._training_config is None: raise RuntimeError(\"Model _training_config is", "loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config is", "saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'],", "from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from", "tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving", "tensorflow as tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load", "tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if", "from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path", "not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config is None: raise", "RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data = [[], [], [], []] model.predict(test_data)", "= load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\")", "RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config is None: raise RuntimeError(\"Model _training_config", "tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model", "raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data = [[], [], [], []]", "from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader)", "loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model,", "import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import", "if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config is None:", "if model._training_config is None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data =", "RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path)", "model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load", "import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not", "import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import", "<reponame>oushu1zhangxiangxuan1/HolmesNER # import tensorflow as tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import", "model\") if model._training_config is None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data", "load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if", "tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path =", "loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not", "tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel", "from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model", "from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from", "tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model =", "model._training_config is None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data = [[],", "load model\") if model._training_config is None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config))", "None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data = [[], [], [],", "is None: raise RuntimeError(\"Model _training_config is None\") model.compile( **saving_utils.compile_args_from_training_config(model._training_config)) test_data = [[], [],", "import saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path,", "KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils", "isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config is None: raise RuntimeError(\"Model", "load_internal from tensorflow.python.keras.saving.saved_model.load import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl", "as tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from tensorflow.python.keras.saving.saved_model.load import", "import RevivedModel from tensorflow.python.keras.saving import saving_utils from tensorflow.python.saved_model import loader_impl model_path = 'output/saved_model/cls/1599723701'", "'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can", "tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise RuntimeError(\"Can not load model\") if model._training_config", "import tensorflow as tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal from", "# import tensorflow as tf from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader from tensorflow.python.saved_model.load import load_internal", "= 'output/saved_model/cls/1599723701' loader_impl.parse_saved_model(model_path) model = load_internal(model_path, tags=['serve'], loader_cls=KerasObjectLoader) if not isinstance(model, RevivedModel): raise", "RuntimeError(\"Can not load model\") if model._training_config is None: raise RuntimeError(\"Model _training_config is None\")" ]
[ "export the actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files", "= FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document", "actions.csv.\" ) args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in the", "print(f\"Updating files in the /actions folder from actions.csv...\") df = pd.read_csv(CSV) actions =", "files in action folder $ python update.py --files-cleanup # Update actions.csv based on", "update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on", "epilog=textwrap.dedent( \"\"\" # Update files in action folder $ python update.py --files-cleanup #", "Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import", "= Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions folder...\")", "This script is used to: - clean up files under /actions - export", "Update actions.csv based on files $ python update.py --files-to-csv # Update README.md based", "folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme =", "df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in the /actions", "- export the actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update", "Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files", "from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient", "SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir,", "folder $ python update.py --files-cleanup # Update actions.csv based on files $ python", "os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is", "actions.to_files() def update_files(): print(f\"Updating files in the /actions folder...\") fc = FileClient() files", "def update_files(): print(f\"Updating files in the /actions folder...\") fc = FileClient() files =", "the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md", "action folder from the actions.csv.\" ) args = parser.parse_args() return args def update_files_from_csv():", ") from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV", "the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df", "pandas as pd from pathlib import Path from utils.action import Action, Actions from", "the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort()", "_get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if args.files_to_readme: update_readme_from_files() if args.csv_to_files: update_files_from_csv()", "cleaning it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action", "= Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in", "if __name__ == \"__main__\": args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files()", "Path from utils.action import Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData,", "action=\"store_true\", help=\"Update data.csv based on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update", "Update files in action folder $ python update.py --files-cleanup # Update actions.csv based", "--files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the", "\"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\",", "help=\"Update data.csv based on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the", "actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in the /actions folder...\") fc", "description=textwrap.dedent( \"\"\" This script is used to: - clean up files under /actions", "), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action folder.\"", "Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ ==", "fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text() md_document =", "/actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df =", "$ python update.py --files-to-csv # Update README.md based on files $ python update.py", ") CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser(", "utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path(", "MarkdownDocument, ) from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) )", "= update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser() if", "def update_files_from_csv(): print(f\"Updating files in the /actions folder from actions.csv...\") df = pd.read_csv(CSV)", "files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files", "FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir,", "python update.py --files-cleanup # Update actions.csv based on files $ python update.py --files-to-csv", "parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md based on the action", "actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions", "action=\"store_true\", help=\"Update the action folder by cleaning it up and sorting it.\" )", "based on files $ python update.py --files-to-csv # Update README.md based on files", "help=\"Update the action folder by cleaning it up and sorting it.\" ) parser.add_argument(", "/actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def", "Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This", "as pd from pathlib import Path from utils.action import Action, Actions from utils.markdown", "update.py --files-cleanup # Update actions.csv based on files $ python update.py --files-to-csv #", "data.csv based on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table", "update_files(): print(f\"Updating files in the /actions folder...\") fc = FileClient() files = fc.get_all_files()", "= readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args", "args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in the /actions folder", "print(f\"Updating actions.csv from files in the /actions folder...\") fc = FileClient() files =", "formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action folder.\" )", "( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient README = Path(", "the table in the README.md based on the action folder.\" ) parser.add_argument( \"--files-cleanup\",", "files in the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions =", "actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in action", "readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args =", "pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the /actions folder...\")", "in the README.md based on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update", "), epilog=textwrap.dedent( \"\"\" # Update files in action folder $ python update.py --files-cleanup", "/actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme", "pd from pathlib import Path from utils.action import Action, Actions from utils.markdown import", "action=\"store_true\", help=\"Update the action folder from the actions.csv.\" ) args = parser.parse_args() return", "on files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\",", "it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the actions.csv.\" )", "script is used to: - clean up files under /actions - export the", "parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action folder.\" ) parser.add_argument( \"--files-to-readme\",", "README.md from files in the /actions folder...\") fc = FileClient() files = fc.get_all_files()", "\"__main__\": args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if args.files_to_readme: update_readme_from_files()", "# Update README.md based on files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter,", "folder from the actions.csv.\" ) args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating", "the actions.csv.\" ) args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in", "= _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if args.files_to_readme: update_readme_from_files() if args.csv_to_files:", "the actions to a csv - export the actions to the readme \"\"\"", "Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions folder...\") fc", "help=\"Update the action folder from the actions.csv.\" ) args = parser.parse_args() return args", "the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in action folder $", "= pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the /actions", "df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the", "md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\":", "CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent(", "update.py --files-to-csv # Update README.md based on files $ python update.py --files-to-readme \"\"\"", "readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv:", "= FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv", "files = fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating", "# Update actions.csv based on files $ python update.py --files-to-csv # Update README.md", "/actions folder from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files():", "export the actions to a csv - export the actions to the readme", "folder by cleaning it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update", "the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files()", "files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update", "utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient README", "Update README.md based on files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, )", "README.md based on files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument(", "= parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in the /actions folder from", "args def update_files_from_csv(): print(f\"Updating files in the /actions folder from actions.csv...\") df =", "update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions folder...\") fc = FileClient() files", "help=\"Update the table in the README.md based on the action folder.\" ) parser.add_argument(", "action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md based", "the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning", "return args def update_files_from_csv(): print(f\"Updating files in the /actions folder from actions.csv...\") df", "= fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text() md_document", "from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files", "from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV =", "Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the /actions folder...\") fc = FileClient()", "= Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__", "os import textwrap import argparse import pandas as pd from pathlib import Path", "== \"__main__\": args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if args.files_to_readme:", ") parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md based on the", "actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files", "fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating", "actions) readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser() if args.files_cleanup: update_files() if", "from pathlib import Path from utils.action import Action, Actions from utils.markdown import (", "actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document,", "to: - clean up files under /actions - export the actions to a", "used to: - clean up files under /actions - export the actions to", "Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions)", "on files $ python update.py --files-to-csv # Update README.md based on files $", "actions.csv based on files $ python update.py --files-to-csv # Update README.md based on", "\"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in action folder $ python update.py", "actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the /actions folder...\") fc", "import textwrap import argparse import pandas as pd from pathlib import Path from", "FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from", "update_readme_from_files(): print(f\"Updating README.md from files in the /actions folder...\") fc = FileClient() files", "\"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md based on the action folder.\"", "files $ python update.py --files-to-csv # Update README.md based on files $ python", "\"\"\" This script is used to: - clean up files under /actions -", "files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text()", "= Actions.read_from_files(files) actions.sort() readme = Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id,", "folder from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating", "= fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in", "from utils.action import Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument,", "to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in action folder", "sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the actions.csv.\"", "actions to a csv - export the actions to the readme \"\"\" ),", "it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder", "def update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions folder...\") fc = FileClient()", "FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README) md_document =", "parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to: - clean up", "os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script", "argparse import pandas as pd from pathlib import Path from utils.action import Action,", "import Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from", "Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def", "\"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used", "and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the", "the /actions folder from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def", "\"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action", "parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in the /actions folder from actions.csv...\")", "actions.sort() readme = Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document)", "parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the actions.csv.\" ) args =", "README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\"))", "fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in the", "files in the /actions folder from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df)", "__name__ == \"__main__\": args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if", "README.md based on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action", "based on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in", "print(f\"Updating files in the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions", "Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser() if args.files_cleanup: update_files()", "FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files():", "argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to: - clean up files under", "_get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to: - clean", "action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning it", "\"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser =", "$ python update.py --files-cleanup # Update actions.csv based on files $ python update.py", "import pandas as pd from pathlib import Path from utils.action import Action, Actions", "= actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in the /actions folder...\")", "/actions - export the actions to a csv - export the actions to", "import os import textwrap import argparse import pandas as pd from pathlib import", "= Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) )", ") args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files in the /actions", "fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.sort() readme = Path(README)", "action folder $ python update.py --files-cleanup # Update actions.csv based on files $", "up files under /actions - export the actions to a csv - export", "action=\"store_true\", help=\"Update the table in the README.md based on the action folder.\" )", "based on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder", "= argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to: - clean up files", "\"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the actions.csv.\" ) args = parser.parse_args()", "df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in the /actions folder...\") fc =", "folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the README.md based on", "by cleaning it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the", "actions.to_files() def update_csv_from_files(): print(f\"Updating actions.csv from files in the /actions folder...\") fc =", "fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from", "import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient README =", "from files in the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions", "import Path from utils.action import Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID,", "= Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in the /actions folder...\") fc =", "actions.csv from files in the /actions folder...\") fc = FileClient() files = fc.get_all_files()", "based on files $ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\",", "is used to: - clean up files under /actions - export the actions", "- export the actions to a csv - export the actions to the", "print(f\"Updating README.md from files in the /actions folder...\") fc = FileClient() files =", "in the /actions folder from actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files()", "def update_readme_from_files(): print(f\"Updating README.md from files in the /actions folder...\") fc = FileClient()", "up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from", ") parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning it up and", "utils.action import Action, Actions from utils.markdown import ( update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, )", "readme = Path(README) md_document = readme.read_text() md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if", "os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser", "readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in action folder $ python", "a csv - export the actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\"", "the action folder from the actions.csv.\" ) args = parser.parse_args() return args def", "update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser() if args.files_cleanup:", "clean up files under /actions - export the actions to a csv -", "on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by", "update_files_from_csv(): print(f\"Updating files in the /actions folder from actions.csv...\") df = pd.read_csv(CSV) actions", "folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df()", "fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV)", "--files-cleanup # Update actions.csv based on files $ python update.py --files-to-csv # Update", "MarkdownData, MarkdownDocument, ) from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\"))", "md_document = update_markdown_document(md_document, Actions.action_id, actions) readme.write_text(md_document) if __name__ == \"__main__\": args = _get_parser()", "in the /actions folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files)", "import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__),", "import argparse import pandas as pd from pathlib import Path from utils.action import", "update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument, ) from utils.files import FileClient README = Path( os.path.realpath(os.path.join(os.path.abspath(__file__),", "os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"README.md\")) ) CSV = Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser():", "- clean up files under /actions - export the actions to a csv", "files under /actions - export the actions to a csv - export the", "csv - export the actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" #", "= Path( os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, \"actions.csv\")) ) def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\"", "# Update files in action folder $ python update.py --files-cleanup # Update actions.csv", "python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based", "--files-to-csv # Update README.md based on files $ python update.py --files-to-readme \"\"\" ),", ") def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to:", "folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning it up", "textwrap import argparse import pandas as pd from pathlib import Path from utils.action", "action folder by cleaning it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\", action=\"store_true\",", "\"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning it up and sorting it.\"", "= FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def", "args = _get_parser() if args.files_cleanup: update_files() if args.files_to_csv: update_csv_from_files() if args.files_to_readme: update_readme_from_files() if", "parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the action folder by cleaning it up and sorting", "on the action folder.\" ) parser.add_argument( \"--files-to-readme\", action=\"store_true\", help=\"Update the table in the", ") parser.add_argument( \"--csv-to-files\", action=\"store_true\", help=\"Update the action folder from the actions.csv.\" ) args", "in action folder $ python update.py --files-cleanup # Update actions.csv based on files", "Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md from files in the", "\"\"\" # Update files in action folder $ python update.py --files-cleanup # Update", "$ python update.py --files-to-readme \"\"\" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv", "actions.csv...\") df = pd.read_csv(CSV) actions = Actions.read_from_df(df) actions.to_files() def update_files(): print(f\"Updating files in", "from the actions.csv.\" ) args = parser.parse_args() return args def update_files_from_csv(): print(f\"Updating files", "folder...\") fc = FileClient() files = fc.get_all_files() actions = Actions.read_from_files(files) actions.to_files() def update_csv_from_files():", "pathlib import Path from utils.action import Action, Actions from utils.markdown import ( update_markdown_document,", "to a csv - export the actions to the readme \"\"\" ), epilog=textwrap.dedent(", "table in the README.md based on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\",", "def _get_parser(): parser = argparse.ArgumentParser( description=textwrap.dedent( \"\"\" This script is used to: -", "the README.md based on the action folder.\" ) parser.add_argument( \"--files-cleanup\", action=\"store_true\", help=\"Update the", "python update.py --files-to-csv # Update README.md based on files $ python update.py --files-to-readme", "the actions to the readme \"\"\" ), epilog=textwrap.dedent( \"\"\" # Update files in", "the action folder by cleaning it up and sorting it.\" ) parser.add_argument( \"--csv-to-files\",", ") parser.add_argument( \"--files-to-csv\", action=\"store_true\", help=\"Update data.csv based on the action folder.\" ) parser.add_argument(", "= fc.get_all_files() actions = Actions.read_from_files(files) df = actions.to_df() df.to_csv(CSV) def update_readme_from_files(): print(f\"Updating README.md", "under /actions - export the actions to a csv - export the actions" ]
[ "cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the image # Take the", "cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def brightness(img,", "ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) #", "sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10,", "cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def brightness(img, exp_adj): imghsv =", "s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume:", "= cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb =", "change. Under it converts to greyscale # and about 1.5 is immensely high", "2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2 = ax[1] # The image", "= cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() # call update function", "image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial value for the image", "= v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) #", "histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def main(): fig, ax", "'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while", "matplotlib.animation as animation from matplotlib.widgets import Slider import cv2 as cv FILE_NAME =", "ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial value", "s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return", "saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram colors", "= cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def", "cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv", "Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) #", "cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def plt_hist(ax,", "saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update also the histogram colors", "cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img,", "# 1.0 means no change. Under it converts to greyscale # and about", "numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.widgets", "greyscale # and about 1.5 is immensely high def saturate(img, satadj): imghsv =", "fig.canvas.draw_idle() # call update function on slider value change sat_slider.on_changed(update_sat) exp_slider.on_changed(update_exp) plt.show() main()", "import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.widgets import Slider import", "val) imobj.set_data(newimg) # update also the histogram colors = ('b', 'g', 'r') for", "matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.widgets import Slider import cv2", "= s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) #", "img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the image #", "np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return", "in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() #", "in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg =", "# Axes for the saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02])", "ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the", "lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() # call update function on slider", "fontsize=16) # Calculate the initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) #", "FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg", "lines.append(line) def update_sat(val): newimg = img # update image newimg = saturate(newimg, val)", "colors = ('r', 'g', 'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram)", "update image newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update", "for the saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp =", "= plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2 = ax[1] #", "= brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram colors = ('r', 'g',", "('b', 'g', 'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw", "cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255)", "RGB # Draw the image # Take the handle for later imobj =", "# assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw", "exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v = v*exp_adj", "= cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb =", "saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s =", "redraw canvas while idle fig.canvas.draw_idle() # call update function on slider value change", "while idle fig.canvas.draw_idle() # call update function on slider value change sat_slider.on_changed(update_sat) exp_slider.on_changed(update_exp)", "= 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg #", "= ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial", "means no change. Under it converts to greyscale # and about 1.5 is", "fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2 =", "matplotlib.widgets import Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html #", "imobj = ax2.imshow(img) # Axes for the saturation and brightness ax_sat = plt.axes([0.25,", "also the histogram colors = ('b', 'g', 'r') for k,color in enumerate(colors): histogram", "# Take the handle for later imobj = ax2.imshow(img) # Axes for the", "= ax[0] # The histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image", "cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the image # Take the handle", "0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider", "imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def plt_hist(ax, img,", "# Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness',", "# https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg # # satadj: # 1.0", "also the histogram colors = ('r', 'g', 'b') for k,color in enumerate(colors): histogram", "imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s = s*satadj s", "satadj: # 1.0 means no change. Under it converts to greyscale # and", "plt_handle, = ax.plot(histogram, color=color) return plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0))", "Axes for the saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp", "enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img", "# # satadj: # 1.0 means no change. Under it converts to greyscale", "for later imobj = ax2.imshow(img) # Axes for the saturation and brightness ax_sat", "(h, s, v) = cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv =", "= ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img # update image newimg =", "return rgb return imgrgb def plt_hist(ax, img, color): colors = ['b', 'g', 'r']", "cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB #", "# Histogram colors = ('r', 'g', 'b') lines = [] for k,color in", "canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val)", "histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img #", "cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s = s*satadj s = np.clip(s,0,255)", "imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v = v*exp_adj v", "for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB)", "the handle for later imobj = ax2.imshow(img) # Axes for the saturation and", "k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg", "plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1)", "cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def plt_hist(ax, img, color): colors =", "# plt assumes RGB # Draw the image # Take the handle for", "from matplotlib.widgets import Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html", "plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The", "valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors = ('r',", "= cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB", "color=color) return plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0]", "for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val):", "https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg # # satadj: #", "# image in rbg # # satadj: # 1.0 means no change. Under", "Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors = ('r', 'g', 'b') lines", "= img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update", "the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) #", "# https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg # # satadj:", "# img: # image in rbg # # satadj: # 1.0 means no", "no change. Under it converts to greyscale # and about 1.5 is immensely", "newimg = img # update image newimg = saturate(newimg, val) newimg = brightness(newimg,", "20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors =", "# Calculate the initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume:", "= np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb", "import Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting", "= ('b', 'g', 'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) #", "return plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] #", "cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def plt_hist(ax, img, color): colors", "as np import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.widgets import", "main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2", "assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the", "= ('r', 'g', 'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) #", "update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg)", "The histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) #", "= cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the image # Take", "# The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial value for", "cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg = img", "# assume: return rgb return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\")", "enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() # call", "handle for later imobj = ax2.imshow(img) # Axes for the saturation and brightness", "update also the histogram colors = ('b', 'g', 'r') for k,color in enumerate(colors):", "0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp,", "saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01,", "ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2 = ax[1]", "0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation',", "in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def", "0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat,", "cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() # call update function on", "https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg # # satadj: # 1.0 means", "imobj.set_data(newimg) # update also the histogram colors = ('b', 'g', 'r') for k,color", "histogram colors = ('b', 'g', 'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256])", "Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting #", "def update_sat(val): newimg = img # update image newimg = saturate(newimg, val) newimg", "ax.plot(histogram, color=color) return plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 =", "rgb return imgrgb def plt_hist(ax, img, color): colors = ['b', 'g', 'r'] k", "histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() # call update", "'g', 'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas", "s, v) = cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v])", "for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle", "immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) =", "np import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.widgets import Slider", "is immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v)", "(h, s, v) = cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv =", "'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram", "toner', fontsize=16) # Calculate the initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME))", "('r', 'g', 'b') lines = [] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256])", "fig.canvas.draw_idle() def update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg,", "= cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def plt_hist(ax, img, color):", "lines = [] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color)", "def update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val)", "'b') lines = [] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, =", "= ax.plot(histogram, color=color) return plt_handle def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1", "satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s = s*satadj", "= Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors = ('r', 'g', 'b')", "# redraw canvas while idle fig.canvas.draw_idle() # call update function on slider value", "# assume: return rgb return imgrgb def plt_hist(ax, img, color): colors = ['b',", "= cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img # update", "'Brightness', -10, 10, valinit=1) # Histogram colors = ('r', 'g', 'b') lines =", "as plt import matplotlib.animation as animation from matplotlib.widgets import Slider import cv2 as", "fig.suptitle('Image toner', fontsize=16) # Calculate the initial value for the image img =", "import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from", "= cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v = v*exp_adj v =", "# redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg =", "= [] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line)", "imgrgb def plt_hist(ax, img, color): colors = ['b', 'g', 'r'] k = colors.index(color)", "= plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20,", "brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v =", "imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb", "Calculate the initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR", "return imgrgb def plt_hist(ax, img, color): colors = ['b', 'g', 'r'] k =", "= cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s = s*satadj s =", "img, color): colors = ['b', 'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256])", "animation from matplotlib.widgets import Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' #", "'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in rbg # #", "plt import matplotlib.animation as animation from matplotlib.widgets import Slider import cv2 as cv", "as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image", "sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update also the histogram colors =", "newimg = brightness(newimg, val) imobj.set_data(newimg) # update also the histogram colors = ('b',", "cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def main(): fig, ax = plt.subplots(1,", "brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram colors = ('r', 'g', 'b')", "imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def brightness(img, exp_adj):", "plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram ax2 = ax[1] # The", "[] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def", "= ['b', 'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram,", "0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors", "idle fig.canvas.draw_idle() # call update function on slider value change sat_slider.on_changed(update_sat) exp_slider.on_changed(update_exp) plt.show()", "colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def main(): fig,", "rbg # # satadj: # 1.0 means no change. Under it converts to", "# update image newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) #", "the saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25,", ".03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider =", "= Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1)", "and brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50,", "'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return", "while idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val) newimg", "= img # update image newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val)", "brightness(newimg, val) imobj.set_data(newimg) # update also the histogram colors = ('b', 'g', 'r')", "histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate", "10, valinit=1) # Histogram colors = ('r', 'g', 'b') lines = [] for", "def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s", "np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return", "value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img,", "'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while", "newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the", "# The histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16)", "'g', 'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas", "('r', 'g', 'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw", "The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial value for the", "cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: #", "canvas while idle fig.canvas.draw_idle() # call update function on slider value change sat_slider.on_changed(update_sat)", "the histogram colors = ('b', 'g', 'r') for k,color in enumerate(colors): histogram =", "= colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def main():", "imobj.set_data(newimg) # update also the histogram colors = ('r', 'g', 'b') for k,color", "= saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update also the histogram", "enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def update_exp(val):", "Histogram colors = ('r', 'g', 'b') lines = [] for k,color in enumerate(colors):", "s, v) = cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v])", "ax[0] # The histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner',", "return rgb return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s,", "histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg", "val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram colors =", "v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return", "converts to greyscale # and about 1.5 is immensely high def saturate(img, satadj):", "the initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img", "= ax2.imshow(img) # Axes for the saturation and brightness ax_sat = plt.axes([0.25, .03,", "= np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb", "exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1) # Histogram colors = ('r', 'g',", "Draw the image # Take the handle for later imobj = ax2.imshow(img) #", "img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update also", "image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt", "# update also the histogram colors = ('b', 'g', 'r') for k,color in", "'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle", "ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0,", "as animation from matplotlib.widgets import Slider import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg'", "assumes RGB # Draw the image # Take the handle for later imobj", "ax1 = ax[0] # The histogram ax2 = ax[1] # The image ax2.set_xlim(0.0,1280.0)", "newimg = img newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) #", "cv.calcHist([img],[k],None,[256],[0,256]) line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img # update image", "= cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg =", "redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg = saturate(newimg,", "the histogram colors = ('r', 'g', 'b') for k,color in enumerate(colors): histogram =", "Take the handle for later imobj = ax2.imshow(img) # Axes for the saturation", "img: # image in rbg # # satadj: # 1.0 means no change.", "Under it converts to greyscale # and about 1.5 is immensely high def", "k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle()", "high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv)", "= ('r', 'g', 'b') lines = [] for k,color in enumerate(colors): histogram =", "in rbg # # satadj: # 1.0 means no change. Under it converts", "newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram colors = ('r',", "-10, 10, valinit=1) # Histogram colors = ('r', 'g', 'b') lines = []", "image # Take the handle for later imobj = ax2.imshow(img) # Axes for", "colors = ['b', 'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, =", "cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"),", "1.5 is immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s,", "initial value for the image img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img =", "v) = cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb", "def main(): fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0)) ax1 = ax[0] # The histogram", "brightness ax_sat = plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02])", "# update also the histogram colors = ('r', 'g', 'b') for k,color in", "colors = ('b', 'g', 'r') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256]) lines[k].set_ydata(histogram)", "the image # Take the handle for later imobj = ax2.imshow(img) # Axes", "imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv)", "plt assumes RGB # Draw the image # Take the handle for later", "colors = ('r', 'g', 'b') lines = [] for k,color in enumerate(colors): histogram", "img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes", "ax2.imshow(img) # Axes for the saturation and brightness ax_sat = plt.axes([0.25, .03, 0.50,", "1.0 means no change. Under it converts to greyscale # and about 1.5", "= cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume: return rgb return imgrgb def brightness(img, exp_adj): imghsv", "# Draw the image # Take the handle for later imobj = ax2.imshow(img)", "v = v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB)", "ax2.set_xlim(0.0,1280.0) fig.suptitle('Image toner', fontsize=16) # Calculate the initial value for the image img", "ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img # update image newimg = saturate(newimg,", "= brightness(newimg, val) imobj.set_data(newimg) # update also the histogram colors = ('b', 'g',", "and about 1.5 is immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\")", "'g', 'b') lines = [] for k,color in enumerate(colors): histogram = cv.calcHist([img],[k],None,[256],[0,256]) line,", "img # update image newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg)", "= cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def main(): fig, ax =", "to greyscale # and about 1.5 is immensely high def saturate(img, satadj): imghsv", "import matplotlib.animation as animation from matplotlib.widgets import Slider import cv2 as cv FILE_NAME", "plt_hist(ax, img, color): colors = ['b', 'g', 'r'] k = colors.index(color) histogram =", "BGR img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB # Draw the image", "about 1.5 is immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h,", "idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg = saturate(newimg, sat_slider.val) newimg =", "cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"),", "update also the histogram colors = ('r', 'g', 'b') for k,color in enumerate(colors):", "import cv2 as cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img:", "v) = cv.split(imghsv) v = v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb", "later imobj = ax2.imshow(img) # Axes for the saturation and brightness ax_sat =", "0.50, 0.02]) # Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider =", "def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) v", "assume: return rgb return imgrgb def plt_hist(ax, img, color): colors = ['b', 'g',", "exp_slider.val) imobj.set_data(newimg) # update also the histogram colors = ('r', 'g', 'b') for", "cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) = cv.split(imghsv) s = s*satadj s = np.clip(s,0,255) imghsv", "# satadj: # 1.0 means no change. Under it converts to greyscale #", "return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v) =", "v*exp_adj v = np.clip(v,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB) # assume:", "image newimg = saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also", "it converts to greyscale # and about 1.5 is immensely high def saturate(img,", "valinit=1) # Histogram colors = ('r', 'g', 'b') lines = [] for k,color", "s = s*satadj s = np.clip(s,0,255) imghsv = cv.merge([h,s,v]) imgrgb = cv.cvtColor(imghsv.astype(\"uint8\"), cv.COLOR_HSV2RGB)", "rgb return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h, s, v)", "lines[k].set_ydata(histogram) # redraw canvas while idle fig.canvas.draw_idle() def update_exp(val): newimg = img newimg", "histogram colors = ('r', 'g', 'b') for k,color in enumerate(colors): histogram = cv.calcHist([newimg],[k],None,[256],[0,256])", "line, = ax1.plot(histogram,color=color) lines.append(line) def update_sat(val): newimg = img # update image newimg", "cv FILE_NAME = 'res/mountain-and-lake.jpg' # https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html # https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting # img: # image in", "k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color) return plt_handle def", "newimg = saturate(newimg, sat_slider.val) newimg = brightness(newimg, val) imobj.set_data(newimg) # update also the", "def plt_hist(ax, img, color): colors = ['b', 'g', 'r'] k = colors.index(color) histogram", "color): colors = ['b', 'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle,", "['b', 'g', 'r'] k = colors.index(color) histogram = cv.calcHist([img],[k],None,[256],[0,256]) plt_handle, = ax.plot(histogram, color=color)", "image in rbg # # satadj: # 1.0 means no change. Under it", "= saturate(newimg, val) newimg = brightness(newimg, exp_slider.val) imobj.set_data(newimg) # update also the histogram", "assume: return rgb return imgrgb def brightness(img, exp_adj): imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype(\"float32\") (h,", "= plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider", "# and about 1.5 is immensely high def saturate(img, satadj): imghsv = cv.cvtColor(img,", "plt.axes([0.25, .03, 0.50, 0.02]) ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02]) # Slider sat_slider", "update_sat(val): newimg = img # update image newimg = saturate(newimg, val) newimg =", "Slider sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1) exp_slider = Slider(ax_exp, 'Brightness', -10," ]
[ "migrations, models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations = [", "model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts',", "migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut',", "name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True),", "[ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias',", "Django 2.0.5 on 2019-03-10 19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0),", "migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price',", "null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='total', field=models.FloatField(default=0), ), ]", "name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity',", "name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250,", "max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='total', field=models.FloatField(default=0), ),", "), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ),", "), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ),", "model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250,", "migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField(", "max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ),", "), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ),", "'0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1),", "Generated by Django 2.0.5 on 2019-03-10 19:46 from django.db import migrations, models class", "Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user',", "field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0),", "operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField(", "model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0),", "dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user', ),", "field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0),", "field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True),", "), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts',", "= [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts',", "19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'),", "migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName',", "2.0.5 on 2019-03-10 19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ),", "model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts',", "] operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ),", "migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField(", "on 2019-03-10 19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "by Django 2.0.5 on 2019-03-10 19:46 from django.db import migrations, models class Migration(migrations.Migration):", "('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial',", "name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='total',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ]", "name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps',", "name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva',", "), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts',", "field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='quantity', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='total', field=models.FloatField(default=0),", "name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations", "field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField(", "model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts',", "migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField(", "# Generated by Django 2.0.5 on 2019-03-10 19:46 from django.db import migrations, models", "= [ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField(", "models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField(", "model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='price', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='productName', field=models.CharField(blank=True,", "null=True), ), migrations.AlterField( model_name='ticketproducts', name='ieps', field=models.FloatField(default=0), ), migrations.AlterField( model_name='ticketproducts', name='iva', field=models.FloatField(default=0), ), migrations.AlterField(", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('SV', '0005_auto_20190305_0116'), ] operations =", "migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True,", "[ ('SV', '0005_auto_20190305_0116'), ] operations = [ migrations.RemoveField( model_name='cut', name='user', ), migrations.AlterField( model_name='cut',", "2019-03-10 19:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('SV',", "model_name='cut', name='serial', field=models.IntegerField(default=1), ), migrations.AlterField( model_name='ticketproducts', name='alias', field=models.CharField(blank=True, max_length=250, null=True), ), migrations.AlterField( model_name='ticketproducts'," ]
[ "template, data=None): if data is None: data = {} t = self.loader.load( template", "= cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode ):", "TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource): isLeaf = True def __init__(self,", "zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\")", "zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") )", "import os import cStringIO, gzip class TemplateResource(Resource): isLeaf = True def __init__(self, path", "= gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") )", ") else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def", "path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self,", "cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode ): zfile.write(", "= self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\")", "= self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding =", "zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self, template, data=None): if data", ") zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self, template, data=None): if", ") else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\",", "\"\") + \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" )", "content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding:", "content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf =", "zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str", "import Resource from genshi.template import TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource):", "= TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self, request): if", "unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else:", "self.path is not None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else:", "else: return content def _render_template(self, template, data=None): if data is None: data =", "__init__(self, path = None): self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates')", "= None): self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] ,", "data = {} t = self.loader.load( template ) return t.generate( data=data ).render('xhtml', doctype='xhtml')", "unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self, template, data=None):", "\"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if", "self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") +", "auto_reload=True ) def render_GET(self, request): if self.path is not None: content = self._render_template(", "def _render_template(self, template, data=None): if data is None: data = {} t =", "from genshi.template import TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource): isLeaf =", "content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close()", "isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") )", "content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write( unicode(content,", "self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True )", "\"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding", "if encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb',", "TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self, request): if self.path", "None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content = self._render_template(", "9, zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content,", "if self.path is not None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" )", "import cStringIO, gzip class TemplateResource(Resource): isLeaf = True def __init__(self, path = None):", "if data is None: data = {} t = self.loader.load( template ) return", "os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self, request): if self.path is not", "'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return", "): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\")", "\"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO()", "def __init__(self, path = None): self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__),", ") content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in", "True def __init__(self, path = None): self.path = path loader = TemplateLoader( search_path=[", "return content def _render_template(self, template, data=None): if data is None: data = {}", "str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\")", "search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self, request): if self.path is", "os import cStringIO, gzip class TemplateResource(Resource): isLeaf = True def __init__(self, path =", "class TemplateResource(Resource): isLeaf = True def __init__(self, path = None): self.path = path", "gzip class TemplateResource(Resource): isLeaf = True def __init__(self, path = None): self.path =", "zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue()", "request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self, template, data=None): if data is", "self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\"", "= content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf", "else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self,", "not None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content =", "+ \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content", "unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8'", "import TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource): isLeaf = True def", "zbuf.getvalue() else: return content def _render_template(self, template, data=None): if data is None: data", ").encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content", "request): if self.path is not None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\"", "path = None): self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ]", "from twisted.web.resource import Resource from genshi.template import TemplateLoader import os import cStringIO, gzip", "= True def __init__(self, path = None): self.path = path loader = TemplateLoader(", "request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if", "data is None: data = {} t = self.loader.load( template ) return t.generate(", "): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return", "is not None: content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content", "zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode", "elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write( unicode(content).encode(\"utf-8\")", "'wb', 9, zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance(", "loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def render_GET(self, request):", "render_GET(self, request): if self.path is not None: content = self._render_template( self.path.replace(\"docs/\", \"\") +", "None): self.path = path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True", "twisted.web.resource import Resource from genshi.template import TemplateLoader import os import cStringIO, gzip class", "encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile", "def render_GET(self, request): if self.path is not None: content = self._render_template( self.path.replace(\"docs/\", \"\")", "'../web_templates') ] , auto_reload=True ) def render_GET(self, request): if self.path is not None:", "return zbuf.getvalue() else: return content def _render_template(self, template, data=None): if data is None:", "is None: data = {} t = self.loader.load( template ) return t.generate( data=data", "gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif", "content def _render_template(self, template, data=None): if data is None: data = {} t", "_render_template(self, template, data=None): if data is None: data = {} t = self.loader.load(", "self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\")", "content = self._render_template( self.path.replace(\"docs/\", \"\") + \".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\",", "content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding", ") def render_GET(self, request): if self.path is not None: content = self._render_template( self.path.replace(\"docs/\",", "encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance( content,", "= path loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True ) def", "data=None): if data is None: data = {} t = self.loader.load( template )", "\".genshi\" ) content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and \"gzip\"", "None: data = {} t = self.loader.load( template ) return t.generate( data=data ).render('xhtml',", "+ \".genshi\" ) content = content.replace(\"\\t\", \"\") encoding = request.getHeader(\"accept-encoding\") if encoding and", "cStringIO, gzip class TemplateResource(Resource): isLeaf = True def __init__(self, path = None): self.path", "= request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile =", "TemplateResource(Resource): isLeaf = True def __init__(self, path = None): self.path = path loader", "genshi.template import TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource): isLeaf = True", "encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9,", "isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write(", "unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else:", "] , auto_reload=True ) def render_GET(self, request): if self.path is not None: content", "Resource from genshi.template import TemplateLoader import os import cStringIO, gzip class TemplateResource(Resource): isLeaf", "in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf) if isinstance(", "\".genshi\" ) else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content =", "else: content = self._render_template( request.path.replace(\"docs/\", \"\").strip(\"/\") + \".genshi\" ) content = content.replace(\"\\t\", \"\")", "and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None, 'wb', 9, zbuf)", "zfile.write( unicode(content).encode(\"utf-8\") ) zfile.close() request.setHeader(\"Content-encoding\",\"gzip\") return zbuf.getvalue() else: return content def _render_template(self, template,", "isLeaf = True def __init__(self, path = None): self.path = path loader =", ") elif isinstance( content, str ): zfile.write( unicode(content, 'utf-8' ).encode(\"utf-8\") ) else: zfile.write(", "request.getHeader(\"accept-encoding\") if encoding and \"gzip\" in encoding: zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(None,", ", auto_reload=True ) def render_GET(self, request): if self.path is not None: content =", "if isinstance( content, unicode ): zfile.write( unicode(content).encode(\"utf-8\") ) elif isinstance( content, str ):" ]
[ "cur_num_index += 1 # print(res) return res # Method II: class Solution: def", "= 0 while index <= len(nums2)-1: if nums2[index] == num: cur_num_index = index", "== len(nums2)-1: res.append(-1) break else: cur_num_index += 1 # print(res) return res #", "my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if num >", "nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if not nums1: return [] res", "return res # Method II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int])", "len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break", "= [] my_stack = [] my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]:", "<= len(nums2)-1: if nums2[index] == num: cur_num_index = index break else: index +=", "index += 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num:", "break else: cur_num_index += 1 # print(res) return res # Method II: class", "break elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index += 1 # print(res)", "my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key", "# print(res) return res # Method II: class Solution: def nextGreaterElement(self, nums1: List[int],", "elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index += 1 # print(res) return", "res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index += 1 #", "print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index", "if nums2[index] == num: cur_num_index = index break else: index += 1 #", "Method I: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res", "res = [] my_stack = [] my_dict = {} my_stack.append(nums2[0]) for num in", "num else: break my_stack.append(num) for key in my_stack: my_dict[key] = -1 for i", "index = 0 cur_num_index = 0 while index <= len(nums2)-1: if nums2[index] ==", "key in my_stack: my_dict[key] = -1 for i in nums1: res.append(my_dict[i]) return res", "my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] =", "class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if not nums1:", "res # Method II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) ->", "print(res) return res # Method II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2:", "1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break", "1 # print(res) return res # Method II: class Solution: def nextGreaterElement(self, nums1:", "nums1: index = 0 cur_num_index = 0 while index <= len(nums2)-1: if nums2[index]", "{} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()]", "# Method II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:", "else: index += 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] >", "len(nums2)-1: res.append(-1) break else: cur_num_index += 1 # print(res) return res # Method", "if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break else:", "index <= len(nums2)-1: if nums2[index] == num: cur_num_index = index break else: index", "<= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1)", "num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key in my_stack:", "I: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res =", "len(nums2)-1: if nums2[index] == num: cur_num_index = index break else: index += 1", "else: break my_stack.append(num) for key in my_stack: my_dict[key] = -1 for i in", "cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index += 1 # print(res) return res", "List[int]) -> List[int]: res = [] for num in nums1: index = 0", "nums1: List[int], nums2: List[int]) -> List[int]: res = [] for num in nums1:", "res = [] for num in nums1: index = 0 cur_num_index = 0", "cur_num_index = 0 while index <= len(nums2)-1: if nums2[index] == num: cur_num_index =", "num in nums1: index = 0 cur_num_index = 0 while index <= len(nums2)-1:", "my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key in my_stack: my_dict[key] = -1", "# print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif", "-> List[int]: if not nums1: return [] res = [] my_stack = []", "if num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key in", "nums2: List[int]) -> List[int]: if not nums1: return [] res = [] my_stack", "return [] res = [] my_stack = [] my_dict = {} my_stack.append(nums2[0]) for", "== num: cur_num_index = index break else: index += 1 # print(cur_num_index) while", "my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key in my_stack: my_dict[key] =", "def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if not nums1: return []", "num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index += 1", "break my_stack.append(num) for key in my_stack: my_dict[key] = -1 for i in nums1:", "[] for num in nums1: index = 0 cur_num_index = 0 while index", "[] res = [] my_stack = [] my_dict = {} my_stack.append(nums2[0]) for num", "while index <= len(nums2)-1: if nums2[index] == num: cur_num_index = index break else:", "for key in my_stack: my_dict[key] = -1 for i in nums1: res.append(my_dict[i]) return", "Method II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if", "= 0 cur_num_index = 0 while index <= len(nums2)-1: if nums2[index] == num:", "cur_num_index = index break else: index += 1 # print(cur_num_index) while cur_num_index <=", "num in nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num else:", "num: cur_num_index = index break else: index += 1 # print(cur_num_index) while cur_num_index", "else: cur_num_index += 1 # print(res) return res # Method II: class Solution:", "def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res = [] for num", "List[int], nums2: List[int]) -> List[int]: res = [] for num in nums1: index", "List[int]: if not nums1: return [] res = [] my_stack = [] my_dict", "index break else: index += 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if", "cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1:", "my_stack = [] my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack:", "Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res = [] for", "+= 1 # print(res) return res # Method II: class Solution: def nextGreaterElement(self,", "= index break else: index += 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1:", "nums2: List[int]) -> List[int]: res = [] for num in nums1: index =", "for num in nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num", "nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res = [] for num in", "in nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break", "List[int]: res = [] for num in nums1: index = 0 cur_num_index =", "+= 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index])", "List[int]) -> List[int]: if not nums1: return [] res = [] my_stack =", "while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for", "[] my_stack = [] my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]: while", "0 cur_num_index = 0 while index <= len(nums2)-1: if nums2[index] == num: cur_num_index", "in nums1: index = 0 cur_num_index = 0 while index <= len(nums2)-1: if", "res.append(-1) break else: cur_num_index += 1 # print(res) return res # Method II:", "not nums1: return [] res = [] my_stack = [] my_dict = {}", "my_stack.append(num) for key in my_stack: my_dict[key] = -1 for i in nums1: res.append(my_dict[i])", "= [] for num in nums1: index = 0 cur_num_index = 0 while", "Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if not nums1: return", "> my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num) for key in my_stack: my_dict[key]", "List[int], nums2: List[int]) -> List[int]: if not nums1: return [] res = []", "0 while index <= len(nums2)-1: if nums2[index] == num: cur_num_index = index break", "[] my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if num", "class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: res = []", "# Method I: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:", "while cur_num_index <= len(nums2)-1: if nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index ==", "nums1: return [] res = [] my_stack = [] my_dict = {} my_stack.append(nums2[0])", "= num else: break my_stack.append(num) for key in my_stack: my_dict[key] = -1 for", "for num in nums1: index = 0 cur_num_index = 0 while index <=", "-> List[int]: res = [] for num in nums1: index = 0 cur_num_index", "nums2[1:]: while my_stack: if num > my_stack[-1]: my_dict[my_stack.pop()] = num else: break my_stack.append(num)", "nums2[index] == num: cur_num_index = index break else: index += 1 # print(cur_num_index)", "= [] my_dict = {} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if", "nums1: List[int], nums2: List[int]) -> List[int]: if not nums1: return [] res =", "> num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index +=", "nums2[cur_num_index] > num: res.append(nums2[cur_num_index]) break elif cur_num_index == len(nums2)-1: res.append(-1) break else: cur_num_index", "II: class Solution: def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]: if not", "break else: index += 1 # print(cur_num_index) while cur_num_index <= len(nums2)-1: if nums2[cur_num_index]", "= {} my_stack.append(nums2[0]) for num in nums2[1:]: while my_stack: if num > my_stack[-1]:", "if not nums1: return [] res = [] my_stack = [] my_dict =" ]
[ "pd import torch import torch.nn as nn import yaml from tqdm.auto import tqdm", "import numpy as np import pandas as pd import torch import torch.nn as", "<gh_stars>1-10 import os import torch import numpy as np import pandas as pd", "pandas as pd import torch import torch.nn as nn import yaml from tqdm.auto", "import torch.nn as nn import yaml from tqdm.auto import tqdm from tensorboardX import", "as np import pandas as pd import torch import torch.nn as nn import", "torch.nn as nn import yaml from tqdm.auto import tqdm from tensorboardX import SummaryWriter", "as nn import yaml from tqdm.auto import tqdm from tensorboardX import SummaryWriter from", "os import torch import numpy as np import pandas as pd import torch", "import pandas as pd import torch import torch.nn as nn import yaml from", "numpy as np import pandas as pd import torch import torch.nn as nn", "nn import yaml from tqdm.auto import tqdm from tensorboardX import SummaryWriter from torch.utils.data.dataloader", "as pd import torch import torch.nn as nn import yaml from tqdm.auto import", "yaml from tqdm.auto import tqdm from tensorboardX import SummaryWriter from torch.utils.data.dataloader import DataLoader", "import torch import numpy as np import pandas as pd import torch import", "import os import torch import numpy as np import pandas as pd import", "import yaml from tqdm.auto import tqdm from tensorboardX import SummaryWriter from torch.utils.data.dataloader import", "torch import torch.nn as nn import yaml from tqdm.auto import tqdm from tensorboardX", "torch import numpy as np import pandas as pd import torch import torch.nn", "np import pandas as pd import torch import torch.nn as nn import yaml", "import torch import torch.nn as nn import yaml from tqdm.auto import tqdm from" ]
[ "the source files # import os import sys from syscmd import syscmd from", "and distribution is # subject to the Boost Software License, Version 1.0. (See", "if 0: for s in sources: syscmd('boosthtml %s' % s) else: extensions =", "('html', 'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:] all = [ '%s.%s'", "from sources import sources if 0: for s in sources: syscmd('boosthtml %s' %", "ext in extensions for s in sources ] print 'make %s' % '", "Copyright <NAME> 2004. Use, modification and distribution is # subject to the Boost", "# Copyright <NAME> 2004. Use, modification and distribution is # subject to the", "html, TeX, and PDF versions of all the source files # import os", "Use, modification and distribution is # subject to the Boost Software License, Version", "len(sys.argv) > 1: extensions = sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for", "syscmd from sources import sources if 0: for s in sources: syscmd('boosthtml %s'", "'%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for s in sources ] print", "1: extensions = sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in", "Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at", "all the source files # import os import sys from syscmd import syscmd", "of all the source files # import os import sys from syscmd import", "TeX, and PDF versions of all the source files # import os import", "distribution is # subject to the Boost Software License, Version 1.0. (See accompanying", "at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF versions of all the", "sources: syscmd('boosthtml %s' % s) else: extensions = ('html', 'pdf') if len(sys.argv) >", "sources ] print 'make %s' % ' '.join(all) syscmd('make %s' % ' '.join(all))", "License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #", "if len(sys.argv) > 1: extensions = sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext)", "is # subject to the Boost Software License, Version 1.0. (See accompanying #", "LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF versions", "import sys from syscmd import syscmd from sources import sources if 0: for", "the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy", "= ('html', 'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:] all = [", "#!/usr/bin/python # Copyright <NAME> 2004. Use, modification and distribution is # subject to", "modification and distribution is # subject to the Boost Software License, Version 1.0.", "import os import sys from syscmd import syscmd from sources import sources if", "in sources ] print 'make %s' % ' '.join(all) syscmd('make %s' % '", "<NAME> 2004. Use, modification and distribution is # subject to the Boost Software", "(See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html,", "for s in sources: syscmd('boosthtml %s' % s) else: extensions = ('html', 'pdf')", "for ext in extensions for s in sources ] print 'make %s' %", "source files # import os import sys from syscmd import syscmd from sources", "Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # #", "files # import os import sys from syscmd import syscmd from sources import", "copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF versions of all", "all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for s in", "= [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for s in sources", "and PDF versions of all the source files # import os import sys", "(os.path.splitext(s)[0],ext) for ext in extensions for s in sources ] print 'make %s'", "% s) else: extensions = ('html', 'pdf') if len(sys.argv) > 1: extensions =", "[ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for s in sources ]", "import syscmd from sources import sources if 0: for s in sources: syscmd('boosthtml", "# Generate html, TeX, and PDF versions of all the source files #", "extensions = sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions", "s) else: extensions = ('html', 'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:]", "2004. Use, modification and distribution is # subject to the Boost Software License,", "# subject to the Boost Software License, Version 1.0. (See accompanying # file", "syscmd('boosthtml %s' % s) else: extensions = ('html', 'pdf') if len(sys.argv) > 1:", "1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate", "%s' % s) else: extensions = ('html', 'pdf') if len(sys.argv) > 1: extensions", "extensions for s in sources ] print 'make %s' % ' '.join(all) syscmd('make", "0: for s in sources: syscmd('boosthtml %s' % s) else: extensions = ('html',", "% (os.path.splitext(s)[0],ext) for ext in extensions for s in sources ] print 'make", "# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and", "extensions = ('html', 'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:] all =", "sources if 0: for s in sources: syscmd('boosthtml %s' % s) else: extensions", "<reponame>randolphwong/mcsema<filename>boost/libs/iterator/doc/generate.py #!/usr/bin/python # Copyright <NAME> 2004. Use, modification and distribution is # subject", "'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:] all = [ '%s.%s' %", "# import os import sys from syscmd import syscmd from sources import sources", "http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF versions of all the source", "os import sys from syscmd import syscmd from sources import sources if 0:", "subject to the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt", "to the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or", "= sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for", "s in sources ] print 'make %s' % ' '.join(all) syscmd('make %s' %", "in extensions for s in sources ] print 'make %s' % ' '.join(all)", "from syscmd import syscmd from sources import sources if 0: for s in", "Generate html, TeX, and PDF versions of all the source files # import", "for s in sources ] print 'make %s' % ' '.join(all) syscmd('make %s'", "s in sources: syscmd('boosthtml %s' % s) else: extensions = ('html', 'pdf') if", "sources import sources if 0: for s in sources: syscmd('boosthtml %s' % s)", "file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF", "else: extensions = ('html', 'pdf') if len(sys.argv) > 1: extensions = sys.argv[1:] all", "syscmd import syscmd from sources import sources if 0: for s in sources:", "PDF versions of all the source files # import os import sys from", "or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX, and PDF versions of", "import sources if 0: for s in sources: syscmd('boosthtml %s' % s) else:", "# # Generate html, TeX, and PDF versions of all the source files", "sys from syscmd import syscmd from sources import sources if 0: for s", "accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # Generate html, TeX,", "in sources: syscmd('boosthtml %s' % s) else: extensions = ('html', 'pdf') if len(sys.argv)", "Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)", "> 1: extensions = sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext", "sys.argv[1:] all = [ '%s.%s' % (os.path.splitext(s)[0],ext) for ext in extensions for s", "versions of all the source files # import os import sys from syscmd" ]
[ "combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] *", "image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is", "result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0])", "print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes =", "for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not in", "key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor", "get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each feature map in a list.", "= load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE", "feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings,", "Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size]", "present in the input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size", "print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if i == 0:", "line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width,", "= shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1])", "feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2])", "= np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes", "in the input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size =", "raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] =", "in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict", "image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'),", "('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not", "BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \": \",", "prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings =", "= box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] ==", "num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size,", "shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in", "tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of", "= prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes,", "use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] =", "function takes an input batch of images and runs it through the forward", "corresponds to a feature map in the input `image_features` list. \"\"\" box_encodings_list =", "def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and", "number of box predictions to be made per spatial location for each feature", "str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+", "shapes of true images in the resized images, as resized images can be", "None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape(", "POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return:", "value in result_middle.items(): if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in", "BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key,", "3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result):", "classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections", "num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores", "if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does", "detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints is not None: additional_fields =", "{BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings'", "postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background'", "the postprocess or loss functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs:", "outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\")", "encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the", "key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]],", "as fields from platformx.plat_tensorflow.tools.processor import model_config import config from PIL import Image import", "= num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is", "can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels]", "1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background)", "to yield unpostprocessesed predictions. A side effect of calling the predict method is", "label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0]", "[batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes", "axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch", "not in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1:", "] return [(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list):", "use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image):", "detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images)", "indicating the shapes of true images in the resized images, as resized images", "scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1,", "axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings,", "1] representing the class predictions for the proposals. Each entry in the list", "os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\"", "box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not", "preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections:", "additional_fields = None if detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints}", "[batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is", "num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots =", "input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3))", "= box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints =", "isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections", "cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings", "= image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None):", "= np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4]))", "the resized images, as resized images can be padded with zeros. \"\"\" anchor_generator", "= num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] *", "feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This function", "detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn", "PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,", "= anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape =", "decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack(", "value.shape) boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape)", "= \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor =", "constructed before the postprocess or loss functions can be called. Args: boxes_encodings: classes_predictions_with_background:", "\":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if \"FeatureExtractor\" in key", "box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs':", "of images. num_predictions_per_location_list: A list of integers representing the number of box predictions", "label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir =", "if i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value)", "postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields", "axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:]", "feature_maps: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of", "boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints]", "= np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1)", "0:, 1:] additional_fields = None if detection_keypoints is not None: additional_fields = {", "class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes =", "model_config import config from PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import", "postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections)", "= config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[", "images, as resized images can be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build()", "'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions", "A side effect of calling the predict method is that self._anchors is populated", "to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size]", "np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2", "'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return", "scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks", "the proposals. Each entry in the list corresponds to a feature map in", "[] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape =", "num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location,", "plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width,", "populated with a box_list.BoxList of anchors. These anchors must be constructed before the", "resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height =", "num_anchors, num_keypoints, 2] containing the decoded keypoints if present in the input `box_encodings`,", "box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist", "from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as", "box_list.BoxList of anchors. These anchors must be constructed before the postprocess or loss", "= post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None", "Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes:", "list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the", "boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes]", "* num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots", "= int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs", "scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores,", "[batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor of shape", "im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4", "if present in the input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings)", "predictions. A side effect of calling the predict method is that self._anchors is", "BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0 in", "or the number of classes. Each entry in the list corresponds to a", "label_map_util from scipy import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'", "\"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = []", "2] containing the decoded keypoints if present in the input `box_encodings`, None otherwise.", "form [height, width, channels] indicating the shapes of true images in the resized", "import numpy as np from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder,", "and runs it through the forward pass of the network to yield unpostprocessesed", "result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) #", "a list. Args: feature_maps: a list of tensors where the ith tensor has", "images. num_predictions_per_location_list: A list of integers representing the number of box predictions to", "_add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes", "tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32", "classes. Each entry in the list corresponds to a feature map in the", "import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import os", "the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size,", "0 置 1 操作原始代码 if scores.shape[0] < 100: raw_shape = 100 else: raw_shape", "the predict method is that self._anchors is populated with a box_list.BoxList of anchors.", "These anchors must be constructed before the postprocess or loss functions can be", "cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged", "keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not None: outputs[detection_fields.detection_masks]", "1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes", "str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key,", "list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes +", "+ label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections,", "scores) scores = scores.flatten() # todo 读取配置文件 置 0 置 1 操作原始代码 if", "expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions =", "an input batch of images and runs it through the forward pass of", "feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial", "image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8)", "np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps':", "object locations and corresponding confidences. Args: image_features: A list of float tensors of", "platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util", "tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings,", "location for each feature map. Returns: box_encodings: A list of float tensors of", "= None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints =", "+ BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items(): if str(i) +", "tensor. This function takes an input batch of images and runs it through", "label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes)", "and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size,", "feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1,", "of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns:", "decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))", "feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS)", "or loss functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch,", "list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location", "if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor", "num_anchors_i, q, code_size] representing the location of the objects, where q is 1", "A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features", "ValueError('prediction_dict does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings", "[height, width, channels] indicating the shapes of true images in the resized images,", "number of classes. Each entry in the list corresponds to a feature map", "non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores:", "input batch of images and runs it through the forward pass of the", "NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes']", "isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections,", "feature_maps_np = [] for i in range(6): for key, value in result_middle.items(): if", "操作原始代码 if scores.shape[0] < 100: raw_shape = 100 else: raw_shape = scores.shape[0] scores_1", "list. Args: feature_maps: a list of tensors where the ith tensor has shape", "4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'],", "result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not in key: print(\"key {} value", "def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict or", "if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints])", "'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def", "num_classes + 1] representing the class predictions for the proposals. Each entry in", "for the proposals. Each entry in the list corresponds to a feature map", "for each feature map. Returns: box_encodings: A list of float tensors of shape", "of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the", "= config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings", "key, value in result_middle.items(): if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not", "image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def", "value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {} for key, value in", "float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\",", "tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present", "classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1,", "= prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings)", "= config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1]", "q is 1 or the number of classes. Each entry in the list", "= postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections,", "[] for i in range(6): for key, value in result_middle.items(): if str(i) +", "return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1", "= scores.flatten() # todo 读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0] <", "the class predictions for the proposals. Each entry in the list corresponds to", "that self._anchors is populated with a box_list.BoxList of anchors. These anchors must be", "shape [batch, 3] where each row is of the form [height, width, channels]", "from input tensor. This function takes an input batch of images and runs", "8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape(", "where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list", "box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors,", "= \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor =", "scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes", "= value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value in", "value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] = value.shape[1]", "3] where each row is of the form [height, width, channels] indicating the", "if masks is not None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background,", "decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints", "i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if", "[] feature_maps_np = [] for i in range(6): for key, value in result_middle.items():", "tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of", "np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights),", "nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = {", "representing the location of the objects, where q is 1 or the number", "_compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1)", "prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return", "from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import os BOX_ENCODINGS = 'box_encodings'", "= shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2])", "A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints", "is 1 or the number of classes. Each entry in the list corresponds", "{} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {} for", "postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float):", "np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return", "CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0", "prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not", "fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes", "of the form [height, width, channels] indicating the shapes of true images in", "(12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return", "objects, where q is 1 or the number of classes. Each entry in", "(image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size", "(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes),", "classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes =", "for key, value in result_middle.items(): if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor", "postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0]", "boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if", "preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors", "image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding confidences. Args:", "def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings,", "post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding confidences. Args: image_features: A list", "feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes", "boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape)", "map in the input `image_features` list. class_predictions_with_background: A list of float tensors of", "combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes", "sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape =", "encodings with respect to the anchors. Args: box_encodings: A float32 tensor of shape", "box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from", "score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints is not", "preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width =", "of the objects, where q is 1 or the number of classes. Each", "= 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 =", "result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH)", "classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor", "anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for", "detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes", "for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2],", "nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None", "combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES", "list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for", "of classes. Each entry in the list corresponds to a feature map in", "} return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each", "of integers representing the number of box predictions to be made per spatial", "[] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings,", "= float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width],", "q, code_size] representing the location of the objects, where q is 1 or", "respect to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors,", "in the input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list = [] for", "key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i == 0:", "np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder =", "box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints):", "not None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------", "postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields", "value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 +", "loss functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height,", "result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir,", "tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for", "anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields", "1 操作原始代码 if scores.shape[0] < 100: raw_shape = 100 else: raw_shape = scores.shape[0]", "contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions", "import config from PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util", "# print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs)", "scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1", "decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0], combined_shape[1], 4])) return", "num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims =", "print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'],", "anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box", "int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\", scores) scores = scores.flatten()", "= (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size", "{ 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return", "list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray):", "Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8)", "= boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints", "detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _,", "zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict =", "result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH", "is populated with a box_list.BoxList of anchors. These anchors must be constructed before", "input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs,", "result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \": \",", "shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor of", "[batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each", "result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list =", "true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image", "IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array(", "value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor,", "= get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings", "classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map =", "boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints is", "where q is 1 or the number of classes. Each entry in the", "not in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i", "config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings =", "print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict):", "= decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(),", "height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each feature", "padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths /", "classes_predictions_with_background_np = [] feature_maps_np = [] for i in range(6): for key, value", "the network to yield unpostprocessesed predictions. A side effect of calling the predict", "with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict", "= result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\",", "np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes]", "= decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints,", "result_middle.items(): if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key: print(str(i)", "Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i]", "last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor.", "not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections)", "def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each feature map in a", "BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items(): if str(i) + BASE_ClassPredictor", "yield unpostprocessesed predictions. A side effect of calling the predict method is that", "break if i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break", "import model_config import config from PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils", "box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes", "in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in key:", "np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np,", "num_keypoints, 2] containing the decoded keypoints if present in the input `box_encodings`, None", "category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np)", "i in range(6): for key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in", "= 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) +", "= np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes =", "feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'],", "list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i].", "= anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if", "= preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result", "anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each feature map in", "in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected", "width) for each feature map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map)", "does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings =", "map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps", "4] containing the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors,", "= postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections =", "shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width", "raw_shape = 100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2", "+ BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if", "tensor of shape [batch, 3] where each row is of the form [height,", "if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor", "in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape =", "from PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy", "label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index =", "return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD", "feature map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in", "\"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1],", "+ BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\",", "classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\",", "cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings with respect to", "in key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value)", "images in the resized images, as resized images can be padded with zeros.", "of shape [batch, 3] where each row is of the form [height, width,", "prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims(", "not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain", "feature map in a list. Args: feature_maps: a list of tensors where the", "nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict", "decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1],", "def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories", "post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list", "height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where", "confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i,", "4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints", "raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings']", "detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images,", "numpy as np from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder,", "nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields):", "the input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0]", "PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import", "as resized images can be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list", ":param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors =", "categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes']", "np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder", "feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list)", "of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of", "1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset", "location of the objects, where q is 1 or the number of classes.", "print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items():", "np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:,", "None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not None: outputs[detection_fields.detection_masks] = masks return", "of the network to yield unpostprocessesed predictions. A side effect of calling the", "code_size] representing the location of the objects, where q is 1 or the", "float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths", "post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES", "= sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape", "key_dict = {} for key, value in result_middle.items(): if \"FeatureExtractor\" in key and", "classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1,", "'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict,", "instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def", "containing the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints,", "+ 1] representing the class predictions for the proposals. Each entry in the", "if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not None:", "the objects, where q is 1 or the number of classes. Each entry", "of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A float32 tensor", "additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn(", "box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE", "to a feature map in the input `image_features` list. class_predictions_with_background: A list of", "batch of images. num_predictions_per_location_list: A list of integers representing the number of box", "box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background", "boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np = [] for i in range(6):", "im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None,", "unpostprocessed tensors from input tensor. This function takes an input batch of images", "+ BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if i == 0: if", "image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\"", "be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels] image", "np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not None:", "box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns:", "value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items(): if str(i) + BASE_ClassPredictor in", "the form [height, width, channels] indicating the shapes of true images in the", "if \"FeatureExtractor\" in key and \"fpn\" not in key: print(\"key {} value {}\".format(key,", "None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images,", "tensors from input tensor. This function takes an input batch of images and", "< 100: raw_shape = 100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\",", "- num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] =", "shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils", "box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing", "in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape =", "= masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts", "= last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def", "anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim", "= np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0], combined_shape[1],", "in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape", "= classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes", "a feature map in the input `image_features` list. class_predictions_with_background: A list of float", "postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections)", "run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np = [] for i", "of true images in the resized images, as resized images can be padded", "[batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3]", "of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present in", "batch of box encodings with respect to the anchors. Args: box_encodings: A float32", "- num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections", "if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape)", "boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the", "= result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH =", "np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0], combined_shape[1], 4]))", "classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list =", "detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes:", "boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors", "and \"fpn\" not in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np)", "(im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST", "pass of the network to yield unpostprocessesed predictions. A side effect of calling", "platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND", "float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints: A", "padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list)", "class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape", "anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background,", "\"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda", "outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] =", "list. \"\"\" box_encodings_list = [] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background)", "return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES =", "None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile(", "feature_maps: a list of tensors where the ith tensor has shape [batch, height_i,", "= cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box", "= np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 =", "with a box_list.BoxList of anchors. These anchors must be constructed before the postprocess", "predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each feature map", "num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\", scores) scores", "result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np = [] for i in", "shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\")", "classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes", "os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image)", "postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)", "feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {} for key, value in result_middle.items():", "of box encodings with respect to the anchors. Args: box_encodings: A float32 tensor", "1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes =", "= _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn =", "break else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value)", "= detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints is not None: additional_fields", "A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the", "list of pairs (height, width) for each feature map in feature_maps \"\"\" feature_map_shapes", "the number of classes. Each entry in the list corresponds to a feature", "preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result =", "num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2]", "classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background:", "boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4))", "combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def", "scores.shape[0] < 100: raw_shape = 100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections]", "BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0", "\"\"\" SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs:", "list corresponds to a feature map in the input `image_features` list. \"\"\" box_encodings_list", "true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in", "return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ =", "ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs", "PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS", "num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding confidences. Args: image_features:", "boxes_encodings_np.append(value) break for key, value in result_middle.items(): if str(i) + BASE_ClassPredictor in key", "= \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np =", "of box predictions to be made per spatial location for each feature map.", "box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background =", "fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints]", "range(6): for key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i)", "new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape(", "the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background:", "class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background):", "containing features for a batch of images. num_predictions_per_location_list: A list of integers representing", "decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0],", "str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value)", "box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints =", "= ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights,", "matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import os BOX_ENCODINGS =", "== 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i)", "= np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs,", "if detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes,", "else: if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i) +", "in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object", "true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height", "= np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings =", "box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] == 1:", "clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes,", "not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def", "the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of", "has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width)", "= np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background", "anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape(", "= [] classes_predictions_with_background_np = [] feature_maps_np = [] for i in range(6): for", "{} print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件 置 0 置 1", "from scipy import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor", "shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list:", "unpostprocessesed predictions. A side effect of calling the predict method is that self._anchors", "sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes =", "= boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes", "'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\"", "decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints", "int32 tensor of shape [batch, 3] where each row is of the form", "PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np", "box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background,", "representing the number of box predictions to be made per spatial location for", "batch of images and runs it through the forward pass of the network", "def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings:", "anchors. These anchors must be constructed before the postprocess or loss functions can", "each feature map in a list. Args: feature_maps: a list of tensors where", "else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break", "detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores =", "model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes:", "list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A", "num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size =", "box encodings with respect to the anchors. Args: box_encodings: A float32 tensor of", "value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not in key: print(\"key", "= np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if keypoints is not", "BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in", "\"\"\"Return list of spatial dimensions for each feature map in a list. Args:", "PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in", "\\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import", "classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes,", "for i in range(6): for key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor", "\"\"\"Predicts unpostprocessed tensors from input tensor. This function takes an input batch of", "batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist =", "PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) +", "feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors", "tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a", "corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i,", "raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections)", "network to yield unpostprocessesed predictions. A side effect of calling the predict method", "and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break", "of pairs (height, width) for each feature map in feature_maps \"\"\" feature_map_shapes =", "= Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True,", "value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value in sorted_key_dict:", "elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {}", "preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\")", "outputs[detection_fields.num_detections] = num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks", "np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings':", "for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key]", "feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features,", "Returns: a list of pairs (height, width) for each feature map in feature_maps", "is that self._anchors is populated with a box_list.BoxList of anchors. These anchors must", "4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints =", "feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param", "is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not None: outputs[detection_fields.detection_masks] =", "`box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes =", "= int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\", scores) scores =", "fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape", "np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model", "classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections]", "classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\",", "return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors", "1 or the number of classes. Each entry in the list corresponds to", "class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return", "= {} for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in", "spatial dimensions for each feature map in a list. Args: feature_maps: a list", "width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each", "print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {}", "def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input", "containing box encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4]", "print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件 置 0 置 1 操作原始代码", "for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape", "prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2)", "boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes =", "keypoints if present in the input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape(", "= score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints is", "num_detections) outputs = {} print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件 置", "anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list)", "box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as", "in range(6): for key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key:", "postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks)", "float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1)", "float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if", "feature map in the input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list =", "float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch", "_add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores =", "'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of", "masks is not None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps,", "1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = {", "the input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list = [] for (image_feature,", "== 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else:", "images can be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location()", "np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background =", "in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes", "the location of the objects, where q is 1 or the number of", "classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections,", "[batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor of shape", "A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes:", "outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from", "key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in", "np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores]", "scipy import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor =", "= postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections =", "< 1: key_dict = {} for key, value in result_middle.items(): if \"FeatureExtractor\" in", "get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings =", "\\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import", "feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations", "post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\",", "num_detections if keypoints is not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not", "through the forward pass of the network to yield unpostprocessesed predictions. A side", "encoded object locations and corresponding confidences. Args: image_features: A list of float tensors", "tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32", "not None: outputs[detection_fields.detection_keypoints] = keypoints if masks is not None: outputs[detection_fields.detection_masks] = masks", "box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1", "a list of pairs (height, width) for each feature map in feature_maps \"\"\"", "key=lambda x: x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape", "decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0],", "class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list", "detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background =", "input `box_encodings`, None otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes", "combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size,", "num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------", "= np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background,", "= 100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 =", "CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in", "if box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background", "IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index,", "if scores.shape[0] < 100: raw_shape = 100 else: raw_shape = scores.shape[0] scores_1 =", "shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present in the", "true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged", "box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None", "key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in", "num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess", "box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]),", "is of the form [height, width, channels] indicating the shapes of true images", "np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list,", "print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores =", "outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape", "called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels] image tensor.", "boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] *", "preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This function takes", "= np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 = classes[0:num_detections]", "in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings", "= label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir", "num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size,", "BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape)", "100: raw_shape = 100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1)", "print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items(): if str(i)", "method is that self._anchors is populated with a box_list.BoxList of anchors. These anchors", "100 else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape", "np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0], combined_shape[1], 4])) return decoded_boxes,", "list of integers representing the number of box predictions to be made per", "integers representing the number of box predictions to be made per spatial location", "is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,", "prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.')", "0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\")", "boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints", "\"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings,", "true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings):", "np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints)", "from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import config from", "forward pass of the network to yield unpostprocessesed predictions. A side effect of", "fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get(", "np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\", scores)", "cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of", "None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part", "effect of calling the predict method is that self._anchors is populated with a", "self._anchors is populated with a box_list.BoxList of anchors. These anchors must be constructed", "in a list. Args: feature_maps: a list of tensors where the ith tensor", "{}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {} for key, value", "tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects,", "box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes +", "elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"===============", "vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12,", "= \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS =", "todo 读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0] < 100: raw_shape =", "0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1,", "num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1]", "break for key, value in result_middle.items(): if str(i) + BASE_ClassPredictor in key and", "axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights", "vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import config", "num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes", "locations and corresponding confidences. Args: image_features: A list of float tensors of shape", "in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background,", "load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE =", "prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset", "true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES", "before the postprocess or loss functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps:", "box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings", "+ BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \":", "def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings with respect to the", "each feature map. Returns: box_encodings: A list of float tensors of shape [batch_size,", "置 0 置 1 操作原始代码 if scores.shape[0] < 100: raw_shape = 100 else:", "= os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np =", "of anchors. These anchors must be constructed before the postprocess or loss functions", "\"\"\"Decodes a batch of box encodings with respect to the anchors. Args: box_encodings:", "\": \", value.shape) boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0 in key:", "value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor +", "= scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2))", "print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in key", "detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints is not None:", "preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors =", "= \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs,", "of spatial dimensions for each feature map in a list. Args: feature_maps: a", "to be made per spatial location for each feature map. Returns: box_encodings: A", "box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size])", "if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif", "3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param", "\": \", value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0 in key:", "BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor", "_ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image =", "key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key,", "true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param", "np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:,", "\":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor", "def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np = [] for", "keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections", "box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes =", "row is of the form [height, width, channels] indicating the shapes of true", "print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) +", "otherwise. \"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(),", "the number of box predictions to be made per spatial location for each", "platformx.plat_tensorflow.tools.processor import model_config import config from PIL import Image import matplotlib matplotlib.use('Agg') from", "import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc", "Args: feature_maps: a list of tensors where the ith tensor has shape [batch,", "the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing", "box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps):", "each feature map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map", "key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict =", "classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1,", "= postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields =", "_batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD)", "= { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes,", "of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images.", "config from PIL import Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from", "functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width,", "dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result)", "a feature map in the input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list", "= result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list", "feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return", "feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate(", "= fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes =", "None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) =", "scores = scores.flatten() # todo 读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0]", "* num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes):", "return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if", "= postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections =", "- num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] =", "side effect of calling the predict method is that self._anchors is populated with", "np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist)", "[(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor", "float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions", "classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------", "------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This function takes an input batch", "读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0] < 100: raw_shape = 100", "shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q", "= config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np =", "= [] for i in range(6): for key, value in result_middle.items(): if str(i)", "num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections)", "str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor +", "combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict,", "print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if", "scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores =", "= scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape", "fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None and", "class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\")", "[batch, 3] where each row is of the form [height, width, channels] indicating", "for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes", "predictions for the proposals. Each entry in the list corresponds to a feature", "num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0],", "the decoded keypoints if present in the input `box_encodings`, None otherwise. \"\"\" combined_shape", "print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding confidences. Args: image_features: A", "= np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:,", "(height, width) for each feature map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape(", "num_detections :\", num_detections) outputs = {} print(\"scores:\", scores) scores = scores.flatten() # todo", "scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape -", "padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors,", "map in a list. Args: feature_maps: a list of tensors where the ith", "int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs =", "in the list corresponds to a feature map in the input `image_features` list.", "print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims", "image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1,", "fields from platformx.plat_tensorflow.tools.processor import model_config import config from PIL import Image import matplotlib", "in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ]", "= [ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for", "Each entry in the list corresponds to a feature map in the input", "= true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result", "sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key])", "import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\"", "for key, value in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) +", "tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1,", "PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def", "[-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints", "img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image", "box encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing", "(1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 =", "entry in the list corresponds to a feature map in the input `image_features`", "shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded", "os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'],", "box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors,", "\"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\"", "This function takes an input batch of images and runs it through the", "'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images =", "None if detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores,", "detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores,", "return predictions_dict, anchors def get_feature_map_spatial_dims(feature_maps): \"\"\"Return list of spatial dimensions for each feature", "= float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height,", "\"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for", "platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import config from PIL", "+ \": \", value.shape) boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0 in", "key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) + BASE_PPN_BoxPredictor in key: print(str(i)", "shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for", "axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2)", "'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\"", "key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1],", "for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes] def", "with respect to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size,", "combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes,", "if len(feature_maps_np) < 1: key_dict = {} for key, value in result_middle.items(): if", "NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] =", "= box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if", "SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param", "# NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories)", "(im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps,", "scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores", "in key and \"fpn\" not in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value)", "resized images can be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list =", "padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights /", "= post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS)", "depth_i]. Returns: a list of pairs (height, width) for each feature map in", "key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not in key:", "= classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2))", "= \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np =", "decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes.", "resized images, as resized images can be padded with zeros. \"\"\" anchor_generator =", "nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _", "= np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged =", "show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES", "feature map. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i,", "= prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints", "anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1)", "from platformx.plat_tensorflow.tools.processor import model_config import config from PIL import Image import matplotlib matplotlib.use('Agg')", "decoded keypoints if present in the input `box_encodings`, None otherwise. \"\"\" combined_shape =", "= {} print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件 置 0 置", "key and \"fpn\" not in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if", "post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None if", "result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE)", "config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH)", "map. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q,", ":param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict,", "shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor of", "in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value", "prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints =", "a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape [batch,", "outputs = {} print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件 置 0", "= combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList(", ":param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background,", "= anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps,", "preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) #", "BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if \"FeatureExtractor\"", "# plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height,", "box_coder.decode( np.reshape(box_encodings, [-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field(", "np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings)", "scores.flatten() # todo 读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0] < 100:", "nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints", "[batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each", "Image import matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import", "in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not in key: print(\"key {}", "detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn,", "post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings: :param", "the decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2]", "anchors must be constructed before the postprocess or loss functions can be called.", "load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background,", "detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if", "category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0]", "np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten()", "detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields = None if detection_keypoints", "import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor =", "in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i ==", "Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded", "[] classes_predictions_with_background_np = [] feature_maps_np = [] for i in range(6): for key,", "combined_shape[1], num_keypoints, 2])) decoded_boxes = np.reshape(decoded_boxes.get(), np.stack( [combined_shape[0], combined_shape[1], 4])) return decoded_boxes, decoded_keypoints", "def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes, 3,", "== 4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate(", "the forward pass of the network to yield unpostprocessesed predictions. A side effect", "shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals.", "true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return", "3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths),", "processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\"", "true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections)", "1:] additional_fields = None if detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints:", "decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing the decoded", "true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2])", "_add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes)", "classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This", "num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims,", "len(feature_maps_np) < 1: key_dict = {} for key, value in result_middle.items(): if \"FeatureExtractor\"", "feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps,", "= np.ones(shape=raw_shape - num_detections) classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0]))", "max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores']", "detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores", "print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\", scores) scores = scores.flatten() #", "# IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height)", "visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config", "it through the forward pass of the network to yield unpostprocessesed predictions. A", "zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0],", "to a feature map in the input `image_features` list. \"\"\" box_encodings_list = []", "= os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np,", "file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'],", "image_np = load_image_into_numpy_array(image) vis_util.visualize_boxes_and_labels_on_image_array( image_np, result['detection_boxes'], result['detection_classes'], result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) #", "features for a batch of images. num_predictions_per_location_list: A list of integers representing the", "and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True)", "predictions to be made per spatial location for each feature map. Returns: box_encodings:", "[batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A", "if str(i) + BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for", "nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields)", "BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np", "num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections", "A list of integers representing the number of box predictions to be made", "of images and runs it through the forward pass of the network to", "class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background':", "1]) tiled_anchors_boxlist = box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode(", "classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"", "result['detection_scores'], category_index, instance_masks=result.get('detection_masks'), use_normalized_coordinates=True, line_thickness=8) # IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png',", "outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 =", "if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key: print(str(i) +", "of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class", "------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict", "_, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict =", "of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor", "key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] =", "result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict =", "classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding confidences.", "float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A", "key and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break", "detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths, _ = np.split(true_image_shapes,", "predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get()", "shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1, 1])", "box_encodings): \"\"\"Decodes a batch of box encodings with respect to the anchors. Args:", "additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) }", "[ shape_utils.combined_static_and_dynamic_shape( feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape", "keypoints if masks is not None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings,", "{ fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is", "channels] indicating the shapes of true images in the resized images, as resized", "for each feature map in a list. Args: feature_maps: a list of tensors", ":return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict,", "post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map", "in key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x:", "= config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image =", "boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32", "in key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if i", "of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a", "(1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1)", "+ BASE_PPN_BoxPredictor in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value", "BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0", "is not None: outputs[detection_fields.detection_masks] = masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None):", "im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and", "# todo 读取配置文件 置 0 置 1 操作原始代码 if scores.shape[0] < 100: raw_shape", "input tensor. This function takes an input batch of images and runs it", "* combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list} def postprocess(anchors,", "for each feature map in feature_maps \"\"\" feature_map_shapes = [ shape_utils.combined_static_and_dynamic_shape( feature_map) for", "A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing", "width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A list of", "box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict", "shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors", "classes_predictions_with_background): combined_feature_map_shape = image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2]", "postprocess or loss functions can be called. Args: boxes_encodings: classes_predictions_with_background: feature_maps: preprocessed_inputs: a", "== 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1) predictions_dict =", "= { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields", "\", value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0", "\"\"\" box_encodings_list = [] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in", "if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints,", "\"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes)", ":param classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors =", "masks return outputs def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None): print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed", "\"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None):", "[np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1)", "true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors,", "{ fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores,", "entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background']", "im_width, 3)).astype(np.uint8) def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer", "* combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape) box_encodings_list.append(box_encodings) num_classes =", "= classes boxes_1 = boxes[:, 0:num_detections] print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape -", "image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps) anchors_list = anchor_generator.generate( feature_map_spatial_dims, im_height=image_shape[1],", "else: raw_shape = scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape -", "and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes):", "input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list = [] for (image_feature, num_predictions_per_location,", "not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs']", "num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes:", "and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings, axis=2) class_predictions_with_background = np.concatenate( prediction_dict['class_predictions_with_background'], axis=1)", "config.cfg.POSTPROCESSOR.NUM_CLASSES num_class_slots = num_classes + 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] *", "key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items(): if", "num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry", "1: key_dict = {} for key, value in result_middle.items(): if \"FeatureExtractor\" in key", "= [] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list,", "i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break", "------------------\") \"\"\"Computes encoded object locations and corresponding confidences. Args: image_features: A list of", "of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where", "A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes. decoded_keypoints:", "\"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = []", "= label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] =", "can be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() #", "axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors':", "return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings with respect", "classes_predictions_with_background: :param feature_maps: :param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings,", "input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes)", "width, channels] indicating the shapes of true images in the resized images, as", "dimensions for each feature map in a list. Args: feature_maps: a list of", "\"fpn\" not in key: print(\"key {} value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) <", "be made per spatial location for each feature map. Returns: box_encodings: A list", "in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict", "made per spatial location for each feature map. Returns: box_encodings: A list of", "classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key:", "np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\",", "= postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks =", "proposals. Each entry in the list corresponds to a feature map in the", "anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2]", "post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------ post_processor ------------------\") \"\"\"Computes encoded object locations and corresponding", "value {}\".format(key, value.shape)) feature_maps_np.append(value) if len(feature_maps_np) < 1: key_dict = {} for key,", "= 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 =", "box predictions to be made per spatial location for each feature map. Returns:", "in result_middle.items(): if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key:", "= image_feature.shape box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location,", "prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes", "x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes =", "a batch of box encodings with respect to the anchors. Args: box_encodings: A", "-1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings with", "predict method is that self._anchors is populated with a box_list.BoxList of anchors. These", "in the resized images, as resized images can be padded with zeros. \"\"\"", "in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor", "= np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np,", "result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST", "= label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index", "= [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features, num_predictions_per_location_list, boxes_encodings, classes_predictions_with_background): combined_feature_map_shape", "print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores", "and BASE_PPN_ClassPredictor in key: print(str(i) + BASE_ClassPredictor + \":\", value.shape) classes_predictions_with_background_np.append(value) break for", "runs it through the forward pass of the network to yield unpostprocessesed predictions.", "boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value)", "be padded with zeros. \"\"\" anchor_generator = anchor_generator_builder.build() num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\",", "per spatial location for each feature map. Returns: box_encodings: A list of float", "post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor", "channels] image tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row", "= non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window( preprocessed_images, true_image_shapes), additional_fields=additional_fields) detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes,", "fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in", "misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor", "value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if \"FeatureExtractor\" in key and", "decoded boxes. decoded_keypoints: A float32 tensor of shape [batch_size, num_anchors, num_keypoints, 2] containing", "tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints = decoded_keypoints.get_shape()[1]", "tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height,", "takes an input batch of images and runs it through the forward pass", "be constructed before the postprocess or loss functions can be called. Args: boxes_encodings:", "[batch_size, num_anchors, num_keypoints, 2] containing the decoded keypoints if present in the input", "value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in", "scores_2 = np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1,", "spatial location for each feature map. Returns: box_encodings: A list of float tensors", "num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0])", "classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2 = np.ones(shape=raw_shape - num_detections)", "import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from", "true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np, preprocessed_inputs, true_image_shapes) show_detection_result(post_result) return post_result def", "\"\"\" combined_shape = shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0),", "classes = np.hstack((classes_1, classes_2)) classes = np.reshape(classes, (1, classes.shape[0])) outputs[detection_fields.detection_classes] = classes boxes_1", "BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if", "IMAGE_SIZE = (12, 8) # plt.figure(figsize=IMAGE_SIZE) misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) =", "isinstance(num_detections, np.ndarray): num_detections = int(num_detections[0]) print(\"=============== num_detections :\", num_detections) outputs = {} print(\"scores:\",", "+ BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break", ":\", num_detections) outputs = {} print(\"scores:\", scores) scores = scores.flatten() # todo 读取配置文件", "'detection_classes'][0].astype(np.uint8) result['detection_boxes'] = result['detection_boxes'][0] result['detection_scores'] = result['detection_scores'][0] img_dir = config.cfg.PREPROCESS.IMG_LIST file_list = os.listdir(img_dir)", "pairs (height, width) for each feature map in feature_maps \"\"\" feature_map_shapes = [", "float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the", "anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors)", "the shapes of true images in the resized images, as resized images can", "import label_map_util from scipy import misc import os BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND =", "as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import", "as np from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\", "new_shape = np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings,", "the list corresponds to a feature map in the input `image_features` list. \"\"\"", "+ BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if i == 0: if PPN_ClassPredictor_0", "postprocess ------------------\") if ('box_encodings' not in prediction_dict or 'class_predictions_with_background' not in prediction_dict): raise", "representing the class predictions for the proposals. Each entry in the list corresponds", "containing the decoded keypoints if present in the input `box_encodings`, None otherwise. \"\"\"", "\"FeatureExtractor\" in key and \"fpn\" not in key: print(\"key {} value {}\".format(key, value.shape))", "box_encodings_list = [] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings, class_predictions_with_background) in zip(image_features,", "corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list", "true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32) true_image_shapes = true_image_shapes.reshape((1, 3)) post_result = post_deal(boxes_encodings_np,", "list of spatial dimensions for each feature map in a list. Args: feature_maps:", "print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map,", "a list of tensors where the ith tensor has shape [batch, height_i, width_i,", "config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np = [] feature_maps_np = []", "in key: print(str(i) + BASE_PPN_BoxPredictor, value.shape) boxes_encodings_np.append(value) break for key, value in result_middle.items():", "[-1, box_coder.code_size]), tiled_anchors_boxlist) decoded_keypoints = None if decoded_boxes.has_field(fields.BoxListFields.keypoints): decoded_keypoints = decoded_boxes.get_field( fields.BoxListFields.keypoints) num_keypoints", "_batch_decode(anchors, box_encodings): \"\"\"Decodes a batch of box encodings with respect to the anchors.", "break for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\" not", "num_keypoints = decoded_keypoints.get_shape()[1] decoded_keypoints = np.reshape( decoded_keypoints, np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2])) decoded_boxes =", "= num_detections[0] elif isinstance(num_detections, float): num_detections = int(num_detections) elif isinstance(num_detections, np.ndarray): num_detections =", "= detection_boxes detection_boxes = np.expand_dims(detection_boxes, axis=2) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions)", "/ padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def", "BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value) break if", "preprocessed_inputs=None, true_image_shapes=None): \"\"\" SSD model POST processer :param boxes_encodings: :param classes_predictions_with_background: :param feature_maps:", "= { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() }", "Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings.", "matplotlib matplotlib.use('Agg') from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util from scipy import misc import os BOX_ENCODINGS", "boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] =", "fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if (nmsed_additional_fields is not", "BASE_BoxEncodingPredictor = \"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor", "in key and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape)", "= np.zeros(shape=raw_shape - num_detections) scores = np.hstack((scores_1, scores_2)) scores = np.reshape(scores, (1, scores.shape[0]))", "= None if detection_keypoints is not None: additional_fields = { fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes,", "preprocessed_inputs: a [batch, height, width, channels] image tensor. true_image_shapes: int32 tensor of shape", "channels_i] containing features for a batch of images. num_predictions_per_location_list: A list of integers", "num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif isinstance(num_detections, float): num_detections", "scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 = classes[0:num_detections] print(\"classes_1:\", classes_1) classes_2", "in result_middle.items(): if str(i) + BASE_BoxEncodingPredictor in key: print(str(i) + BASE_BoxEncodingPredictor + \":", "if i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else:", "------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores)", "reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1],", "label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list):", "true images in the resized images, as resized images can be padded with", "x: x[1], reverse=True) for key, value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes", "class predictions for the proposals. Each entry in the list corresponds to a", "( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_images) true_heights, true_widths,", "= keypoints if masks is not None: outputs[detection_fields.detection_masks] = masks return outputs def", "key and BASE_PPN_ClassPredictor not in key: print(str(i) + BASE_ClassPredictor+ \": \", value.shape) classes_predictions_with_background_np.append(value)", "+ \":\", value.shape) classes_predictions_with_background_np.append(value) break for key, value in result_middle.items(): if \"FeatureExtractor\" in", "prediction_dict['class_predictions_with_background'], axis=1) predictions_dict = { 'preprocessed_inputs': preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps,", "= box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings) detection_boxes = detection_boxes", "= np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, 1, box_code_size]) box_encodings = np.reshape(box_encodings, new_shape)", "true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset =", "class_predictions_list} def postprocess(anchors, prediction_dict, true_image_shapes): print(\"------------------ postprocess ------------------\") if ('box_encodings' not in prediction_dict", "(nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return", "last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This function takes an input", "where each row is of the form [height, width, channels] indicating the shapes", "def _add_output_tensor_nodes(postprocessed_tensors): print(\"------------------ _add_output_tensor_nodes ------------------\") detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes =", "print(\"boxes_1:\", boxes_1) boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2))", "= post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape = shape_utils.combined_static_and_dynamic_shape( preprocessed_inputs) feature_map_spatial_dims = get_feature_map_spatial_dims( feature_maps)", "scores = np.reshape(scores, (1, scores.shape[0])) outputs[detection_fields.detection_scores] = scores classes = classes.flatten() classes_1 =", "last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs) postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes) return _add_output_tensor_nodes(postprocessed_tensors) def _add_output_tensor_nodes(postprocessed_tensors):", "return [(shape[1], shape[2]) for shape in feature_map_shapes] def post_processor(boxes_encodings, classes_predictions_with_background, image_features, num_predictions_per_location_list): print(\"------------------", "np.zeros(shape=(1, raw_shape - num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections]", "= shape_utils.combined_static_and_dynamic_shape( box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = np.tile( np.expand_dims(anchors.get(), 0), [batch_size, 1,", "+ \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i) + BASE_ClassPredictor in key and", "class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1]", "float(num_detections) } if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] =", "break else: if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key: print(str(i)", "detection_dict = { fields.DetectionResultFields.detection_boxes: nmsed_boxes, fields.DetectionResultFields.detection_scores: nmsed_scores, fields.DetectionResultFields.detection_classes: nmsed_classes, fields.DetectionResultFields.num_detections: float(num_detections) } if", "np.stack( [np.zeros_like(true_heights), np.zeros_like(true_widths), true_heights / padded_height, true_widths / padded_width], axis=1) cliped_imaged = cliped_image.reshape(1,", "= scores.shape[0] scores_1 = scores[0:num_detections] print(\"scores_1:\", scores_1) scores_2 = np.zeros(shape=raw_shape - num_detections) scores", "`image_features` list. \"\"\" box_encodings_list = [] class_predictions_list = [] for (image_feature, num_predictions_per_location, box_encodings,", "or 'class_predictions_with_background' not in prediction_dict): raise ValueError('prediction_dict does not contain expected entries.') preprocessed_images", "0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\", value.shape) boxes_encodings_np.append(value) break else: if str(i) +", "fields.BoxListFields.keypoints: detection_keypoints} (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields, num_detections) = non_max_suppression_fn( detection_boxes, detection_scores, clip_window=_compute_clip_window(", "width_i, depth_i]. Returns: a list of pairs (height, width) for each feature map", "of calling the predict method is that self._anchors is populated with a box_list.BoxList", "calling the predict method is that self._anchors is populated with a box_list.BoxList of", "input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i,", "np from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \\ anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \\ visualization_utils", ":param preprocessed_inputs: :param true_image_shapes: :return: \"\"\" prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs)", "standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import config from PIL import Image", "= [] feature_maps_np = [] for i in range(6): for key, value in", "a batch of images. num_predictions_per_location_list: A list of integers representing the number of", "class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND:", "num_predictions_per_location_list = anchor_generator.num_anchors_per_location() # print(\"num_predictions_per_location_list:\", num_predictions_per_location_list) prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background, feature_maps, num_predictions_per_location_list) image_shape", "= postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints =", "must be constructed before the postprocess or loss functions can be called. Args:", "non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields", "config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) result['detection_classes'] = result[ 'detection_classes'][0].astype(np.uint8)", "num_detections, 4)) boxes = np.hstack((boxes_1, boxes_2)) outputs[detection_fields.detection_boxes] = boxes outputs[detection_fields.num_detections] = num_detections if", "置 1 操作原始代码 if scores.shape[0] < 100: raw_shape = 100 else: raw_shape =", "= np.split(true_image_shapes, 3, axis=1) padded_height = float(resized_inputs_shape[1]) padded_width = float(resized_inputs_shape[2]) cliped_image = np.stack(", "\"\"\"Computes encoded object locations and corresponding confidences. Args: image_features: A list of float", "{} for key, value in result_middle.items(): if \"FeatureExtractor\" in key and \"fpn\"in key:", "label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)", "0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if", "if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape) classes_predictions_with_background_np.append(value) break else: if str(i)", "masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) if isinstance(num_detections, list): num_detections = num_detections[0] elif", "/ padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes", "BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS def run_ssd_tf_post(preprocessed_inputs, result_middle=None): boxes_encodings_np = [] classes_predictions_with_background_np", "is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict", "= box_list.BoxList( np.reshape(tiled_anchor_boxes, [-1, 4])) box_coder = box_coder_builder.build(\"faster_rcnn_box_coder\") decoded_boxes = box_coder.decode( np.reshape(box_encodings, [-1,", "for a batch of images. num_predictions_per_location_list: A list of integers representing the number", "feature_map) for feature_map in feature_maps ] return [(shape[1], shape[2]) for shape in feature_map_shapes]", "np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim == 4 and box_encodings.shape[2] == 1: box_encodings = np.squeeze(box_encodings,", "import standard_fields as fields from platformx.plat_tensorflow.tools.processor import model_config import config from PIL import", "value in sorted_key_dict: feature_maps_np.append(result_middle[key]) input_shape = preprocessed_inputs.shape true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)", "height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A list", "if \"FeatureExtractor\" in key and \"fpn\"in key: key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(),", "file_list = os.listdir(img_dir) IMG_PATH = os.path.join(img_dir, file_list[0]) print(\"IMG_PATH:\", IMG_PATH) image = Image.open(IMG_PATH) image_np", "break if i == 0: if PPN_ClassPredictor_0 in key: print(PPN_ClassPredictor_0 + \":\", value.shape)", "outputs[detection_fields.detection_keypoints] = keypoints if masks is not None: outputs[detection_fields.detection_masks] = masks return outputs", "show_detection_result(result): print(\"PATH_TO_LABELS:\", PATH_TO_LABELS) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) # NUM_CLASSES NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES categories =", "= np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS:", "true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the", "key: print(str(i) + BASE_BoxEncodingPredictor + \": \", value.shape) boxes_encodings_np.append(value) break if i ==", "a box_list.BoxList of anchors. These anchors must be constructed before the postprocess or", "images and runs it through the forward pass of the network to yield", "feature_map_spatial_dims, im_height=image_shape[1], im_width=image_shape[2]) anchors = box_list_ops.concatenate(anchors_list) box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1) if box_encodings.ndim ==", "preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes,", "misc.imsave('detection_result_ssd.png', image_np) def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8)", "+ 1 class_predictions_with_background = np.reshape( class_predictions_with_background, np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots]))", "score_conversion_fn = post_processing_builder.build(model_config.SSD) detection_scores_with_background = score_conversion_fn(class_predictions) detection_scores = detection_scores_with_background[0:, 0:, 1:] additional_fields =", "key_dict[key] = value.shape[1] sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True) for key, value", "\"_BoxEncodingPredictor\" BASE_ClassPredictor = \"_ClassPredictor\" PPN_BoxPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\"", "\", value.shape) boxes_encodings_np.append(value) break if i == 0: if PPN_BoxPredictor_0 in key: print(\"PPN_BoxPredictor_0:\",", "num_predictions_per_location_list: A list of integers representing the number of box predictions to be", "print(\"------------------ last_predict_part ------------------\") \"\"\"Predicts unpostprocessed tensors from input tensor. This function takes an", "\"WeightSharedConvolutionalBoxPredictor_BoxPredictor\" PPN_ClassPredictor_0 = \"WeightSharedConvolutionalBoxPredictor_ClassPredictor\" BASE_PPN_BoxPredictor = \"_BoxPredictor\" BASE_PPN_ClassPredictor = \"WeightSharedConvolutionalBoxPredictor\" PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS", "each row is of the form [height, width, channels] indicating the shapes of", "padded_width], axis=1) cliped_imaged = cliped_image.reshape(1, -1) return cliped_imaged def _batch_decode(anchors, box_encodings): \"\"\"Decodes a", "nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = ( nmsed_additional_fields[fields.BoxListFields.keypoints]) return detection_dict def _compute_clip_window(preprocessed_images, true_image_shapes): resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(", "map in the input `image_features` list. \"\"\" box_encodings_list = [] class_predictions_list = []", "not contain expected entries.') preprocessed_images = prediction_dict['preprocessed_inputs'] box_encodings = prediction_dict['box_encodings'] box_encodings = box_encodings", "image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing", "np.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location, num_class_slots])) class_predictions_list.append(class_predictions_with_background) return {BOX_ENCODINGS: box_encodings_list, CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list}", "`image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes", "preprocessed_inputs, 'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps, 'anchors': anchors.get() } return predictions_dict, anchors", "box_list, box_coder_builder, post_processing_builder, \\ visualization_utils as vis_util from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields", "} if (nmsed_additional_fields is not None and fields.BoxListFields.keypoints in nmsed_additional_fields): detection_dict[fields.DetectionResultFields.detection_keypoints] = (" ]
[ "[0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations =", "- start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0) + '\\tElapsed", "= [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2,", "* sum_flow for x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times", "= multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is Not", "0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d", "[ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3],", "sim_number>last_checkpoint: if close_vis is True: restart_vissim = True else: restart_vissim = False if", "sum_flow for x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times =", "_progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim =", "close_vis is True: restart_vissim = True else: restart_vissim = False if sim_number%100 ==", "[fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows =", "in sum_flows: for lost_time in lost_times: for ct in c_times: for i in", "for lost_time in lost_times: for ct in c_times: for i in xrange(len(flow_ratios)): ratios", "import time import datetime import multiprocessing import win32com.client as com import os def", "'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def", "is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else: first_time=0 done", "= 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim = False for sum_flow", ".25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1,", "' % (float(count) / float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if", "restart_vissim = False if sim_number%100 == 0: close_vis = True else: close_vis =", "c_times: for i in xrange(len(flow_ratios)): ratios = [x * sum_flow for x in", "import generate_phase_times, run_simulation, printProgressBar import sys import time import datetime import multiprocessing import", "0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim = False", "for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis", "sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows =", "sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time))))", "in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [", "Completed, ' % (float(count) / float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush()", "Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else: first_time=0 done =", "TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100 for x in", "list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100 for", "lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25,", "is True: restart_vissim = True else: restart_vissim = False if sim_number%100 == 0:", "for i in xrange(len(flow_ratios)): ratios = [x * sum_flow for x in flow_ratios[i][:]]", "win32com.client as com import os def main(): global start_time start_time = time.time() fair_scheme", "0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\"", "if sim_number>last_checkpoint: if close_vis is True: restart_vissim = True else: restart_vissim = False", "(float(count) / float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ ==", "float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main()", ".25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4],", ".7], [0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting", "first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations)", "{}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() ''' while not done: p =", "print \"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else:", "in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True:", "True: restart_vissim = True else: restart_vissim = False if sim_number%100 == 0: close_vis", "start_time start_time = time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme]", "0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations", "while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive():", "first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global", "[float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds", "= list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]%", "import multiprocessing import win32com.client as com import os def main(): global start_time start_time", "0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5,", "= [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow", "0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time =", "restart_vissim = False for sum_flow in sum_flows: for lost_time in lost_times: for ct", "False for sum_flow in sum_flows: for lost_time in lost_times: for ct in c_times:", "p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate()", "restart_vissim = True else: restart_vissim = False if sim_number%100 == 0: close_vis =", "* 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() '''", "flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3,", "for sum_flow in sum_flows: for lost_time in lost_times: for ct in c_times: for", "= time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase =", "sim_number%100 == 0: close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis,", "in xrange(len(flow_ratios)): ratios = [x * sum_flow for x in flow_ratios[i][:]] for scheme_number", "= 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES =", "i in xrange(len(flow_ratios)): ratios = [x * sum_flow for x in flow_ratios[i][:]] for", "if p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue", "for ct in c_times: for i in xrange(len(flow_ratios)): ratios = [x * sum_flow", "LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4,", "relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES", "generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim = True else: restart_vissim =", "import sys import time import datetime import multiprocessing import win32com.client as com import", "vissim_utils.sim import generate_phase_times, run_simulation, printProgressBar import sys import time import datetime import multiprocessing", ".1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2", "of Saturation Flow Rate sum_flows = [float(x)/100 for x in sum_flows] lost_times =", "simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis =", "global start_time start_time = time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes =", "p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is", "def main(): global start_time start_time = time.time() fair_scheme = 'fair' relative_scheme = 'relative'", "'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150]", "generate_phase_times, run_simulation, printProgressBar import sys import time import datetime import multiprocessing import win32com.client", "= [x * sum_flow for x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number,", "def _progress(count, total_size): global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, '", "in lost_times: for ct in c_times: for i in xrange(len(flow_ratios)): ratios = [x", "= int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0)", "print \"Starting to run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\", "Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() ''' while not done: p", "run_simulation, printProgressBar import sys import time import datetime import multiprocessing import win32com.client as", "last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim = False for", "os def main(): global start_time start_time = time.time() fair_scheme = 'fair' relative_scheme =", "[0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to", "1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim = False for sum_flow in", "= 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times =", "0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1,", "time import datetime import multiprocessing import win32com.client as com import os def main():", "True else: restart_vissim = False if sim_number%100 == 0: close_vis = True else:", "for x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios", "start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size)", "print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else: first_time=0 done = True '''", "c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS =", "[35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100 for x in sum_flows] lost_times", "False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed", "seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows", "if __name__ == \"__main__\": main() ''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times,", "[0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time", "0.2, 0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run", "= False if sim_number%100 == 0: close_vis = True else: close_vis = False", "lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating", "as com import os def main(): global start_time start_time = time.time() fair_scheme =", "[0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1,", "0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time", "% (float(count) / float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__", "+=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time = int(time.time()", "done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim", ".25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4,", "args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\" print", "sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim", "else: restart_vissim = False if sim_number%100 == 0: close_vis = True else: close_vis", "= generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim = True else: restart_vissim", "global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) /", "+ '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() ''' while not", "= 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) #", "''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if", "list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of", "100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() ''' while", "'\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\": main() ''' while not done:", "Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else: first_time=0 done = True", "main(): global start_time start_time = time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes", "close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count,", "sum_flow in sum_flows: for lost_time in lost_times: for ct in c_times: for i", "0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]] first_time = 1", "lost_time in lost_times: for ct in c_times: for i in xrange(len(flow_ratios)): ratios =", "sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time = int(time.time() -", "= [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1,", "ratios = [x * sum_flow for x in flow_ratios[i][:]] for scheme_number in xrange(2):", "# TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100 for x", "sum_flows = [float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES =", "Sucessfully!') def _progress(count, total_size): global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed,", "import win32com.client as com import os def main(): global start_time start_time = time.time()", "= 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim =", "__name__ == \"__main__\": main() ''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time,", "close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time =", "= list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100", "TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation", "= [float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20]", "# LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25, .25], [0.1,", "99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False restart_vissim = False for sum_flow in sum_flows:", "for x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number])", "== \"__main__\": main() ''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv'))", "multiprocessing import win32com.client as com import os def main(): global start_time start_time =", "= list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25,", "\"__main__\": main() ''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start()", "sys.stdout.flush() if __name__ == \"__main__\": main() ''' while not done: p = multiprocessing.Process(target=run_simulation,", "start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0) + '\\tElapsed Time:", "== 0: close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim)", "Rate sum_flows = [float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES", "0: close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time", "in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint:", "main() ''' while not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60)", "n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5))", "%d simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis", "simulations Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time = int(time.time() - start_time)", "[0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2,", "sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time =", "= False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations", "x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if", "xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim", "= 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size): global start_time", "Completed Sucessfully!') def _progress(count, total_size): global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%%", "4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS", "Vissim close_vis = False restart_vissim = False for sum_flow in sum_flows: for lost_time", "fair_scheme = 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times", "global Vissim close_vis = False restart_vissim = False for sum_flow in sum_flows: for", "num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim = True", "list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25, .25],", "run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!')", "1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations) sim_number =", "com import os def main(): global start_time start_time = time.time() fair_scheme = 'fair'", "elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) *", "\"Starting to run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329", "= False for sum_flow in sum_flows: for lost_time in lost_times: for ct in", "Flow Rate sum_flows = [float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4)) #", "0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1,", "close_vis = False restart_vissim = False for sum_flow in sum_flows: for lost_time in", "= len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint", "start_time = time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase", "if sim_number%100 == 0: close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv',", "\"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join() else: first_time=0", "else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1", "x in sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios =", "0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2,", "len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint =", "= True else: restart_vissim = False if sim_number%100 == 0: close_vis = True", "if close_vis is True: restart_vissim = True else: restart_vissim = False if sim_number%100", "total_size): global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count)", "p.start() p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number)", "_progress(count, total_size): global start_time elapsed_time = int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' %", "time.time() fair_scheme = 'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4", "in c_times: for i in xrange(len(flow_ratios)): ratios = [x * sum_flow for x", "False if sim_number%100 == 0: close_vis = True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time,", "lost_times: for ct in c_times: for i in xrange(len(flow_ratios)): ratios = [x *", "= True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0", "sys import time import datetime import multiprocessing import win32com.client as com import os", "= 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations) sim_number", "sum_flows] lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds flow_ratios = [ [0.25,", "run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim", "seconds flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2,", "[0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1,", "/ float(total_size) * 100.0) + '\\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time)))) sys.stdout.flush() if __name__ == \"__main__\":", "reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll simulations Completed Sucessfully!') def _progress(count, total_size):", "'fair' relative_scheme = 'relative' schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE", "printProgressBar import sys import time import datetime import multiprocessing import win32com.client as com", "from vissim_utils.sim import generate_phase_times, run_simulation, printProgressBar import sys import time import datetime import", "0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7],", "False restart_vissim = False for sum_flow in sum_flows: for lost_time in lost_times: for", "sum_flows: for lost_time in lost_times: for ct in c_times: for i in xrange(len(flow_ratios)):", "num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print \"Starting to run %d simulations\" %(num_simulations) sim_number = 0", "import datetime import multiprocessing import win32com.client as com import os def main(): global", "flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if", "multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\"", "Saturation Flow Rate sum_flows = [float(x)/100 for x in sum_flows] lost_times = list(xrange(4,32,4))", "= [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds sum_flows", "import os def main(): global start_time start_time = time.time() fair_scheme = 'fair' relative_scheme", "to run %d simulations\" %(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global", "[x * sum_flow for x in flow_ratios[i][:]] for scheme_number in xrange(2): _progress(sim_number, num_simulations)", "close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number +=1 sys.stdout.write('\\rAll", "= [35,...,50,51,....,99,100]% of Saturation Flow Rate sum_flows = [float(x)/100 for x in sum_flows]", "phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is True: restart_vissim = True else:", "scheme_number in xrange(2): _progress(sim_number, num_simulations) phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number]) if sim_number>last_checkpoint: if close_vis is", "p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating run #{}\".format(sim_number) p.terminate() continue #p.join()", "schemes = [fair_scheme,relative_scheme] n_phase = 4 c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds", "ct in c_times: for i in xrange(len(flow_ratios)): ratios = [x * sum_flow for", "datetime import multiprocessing import win32com.client as com import os def main(): global start_time", "[4,8,12,16,20] seconds flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4],", "= False restart_vissim = False for sum_flow in sum_flows: for lost_time in lost_times:", "xrange(len(flow_ratios)): ratios = [x * sum_flow for x in flow_ratios[i][:]] for scheme_number in", "True else: close_vis = False run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv', close_vissim=close_vis, reset_vissim=restart_vissim) first_time = 0 sim_number", "int(time.time() - start_time) sys.stdout.write('\\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0) +", "%(num_simulations) sim_number = 0 last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\\ 99+131+1457+508+183+603+583+1479+503+1890+407+90+1329 global Vissim close_vis = False", ".1, .7], [0.2, 0.2, 0.5, 0.1]] first_time = 1 num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2 print", "0.3], [0.1, 0.1, 0.4, 0.4], [0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]]", "[50,55,...,150] seconds sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate", "'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print \"Vissim is Not Responding...\" print \"Terminating run", "not done: p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times, lost_time,ratios,first_time, 'results_sep08_2.csv')) p.start() p.join(60) if p.is_alive(): print" ]
[ "0: D[i,j] = 510 else: D[i,j] = 1 for k in range(n): for", "for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] =", "import numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET =", "edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] =", "import numpy as np import numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET", "= item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats)", "k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k]", "feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key)", "node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat item[self.feature_matrix_key] = edge_feats_mat return item", "A[i,j] = 1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats, D, E", "D[i,j] new_dist = D[i,k] + D[k,j] if new_dist < old_dist: D[i,j] = new_dist", "A[i,j] == 0: D[i,j] = 510 else: D[i,j] = 1 for k in", "distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key])", "node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats,", "= floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix',", "i == j: pass elif A[i,j] == 0: D[i,j] = 510 else: D[i,j]", "= D[i,k] + D[k,j] if new_dist < old_dist: D[i,j] = new_dist return D", "= np.zeros((n,n), dtype=np.int16) for i in range(n): for j in range(n): if i", "1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset):", "= item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats", "A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0],", "pass elif A[i,j] == 0: D[i,j] = 510 else: D[i,j] = 1 for", "D = floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key =", "edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat item[self.feature_matrix_key]", "node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix',", "j: pass elif A[i,j] == 0: D[i,j] = 510 else: D[i,j] = 1", "'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def", "<gh_stars>1-10 import numpy as np import numba as nb from .graph_dataset import GraphDataset", "@nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats", "feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self,", "= 8 @nb.njit def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for", "= new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats", "int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat", "old_dist = D[i,j] new_dist = D[i,k] + D[k,j] if new_dist < old_dist: D[i,j]", "super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index)", "128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n),", "range(n): for i in range(n): for j in range(n): old_dist = D[i,j] new_dist", "= node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A =", "@nb.njit def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in", "__getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats =", "nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def", "D[k,j] if new_dist < old_dist: D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes,", "= preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat item[self.feature_matrix_key] =", "+ D[k,j] if new_dist < old_dist: D[i,j] = new_dist return D @nb.njit def", "new_dist = D[i,k] + D[k,j] if new_dist < old_dist: D[i,j] = new_dist return", "import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n =", "= 1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats, D, E class", "D[i,j] = 510 else: D[i,j] = 1 for k in range(n): for i", "= feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges =", "= int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat,", "edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for", "super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key)", "= A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in range(n): for j in", "item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats", "distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key =", "range(n): if i == j: pass elif A[i,j] == 0: D[i,j] = 510", "if i == j: pass elif A[i,j] == 0: D[i,j] = 510 else:", "new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats +", "in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D", "edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat =", "j in range(n): if i == j: pass elif A[i,j] == 0: D[i,j]", "edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16)", "D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats =", "StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key =", "preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat item[self.feature_matrix_key] = edge_feats_mat", "i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D = floyd_warshall(A)", "GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n = A.shape[0]", "np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E =", "range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D =", "i in range(n): for j in range(n): if i == j: pass elif", "EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j =", "510 else: D[i,j] = 1 for k in range(n): for i in range(n):", "i in range(n): for j in range(n): old_dist = D[i,j] new_dist = D[i,k]", "edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat item[self.feature_matrix_key] = edge_feats_mat return", "num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats,", "self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes", "in range(n): for j in range(n): if i == j: pass elif A[i,j]", "= edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k", "in range(n): old_dist = D[i,j] new_dist = D[i,k] + D[k,j] if new_dist <", "= distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes =", "for j in range(n): if i == j: pass elif A[i,j] == 0:", "for j in range(n): old_dist = D[i,j] new_dist = D[i,k] + D[k,j] if", "EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16)", "np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1 E[i,j]", "edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats, D,", "= super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats =", "= 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key", "= item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes,", "return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key =", "D[i,j] = 1 for k in range(n): for i in range(n): for j", "= edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return", "for i in range(n): for j in range(n): old_dist = D[i,j] new_dist =", "for i in range(n): for j in range(n): if i == j: pass", "def __getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats", "NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16)", "< old_dist: D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats):", "class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key", "in range(n): for j in range(n): old_dist = D[i,j] new_dist = D[i,k] +", "item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key]", "D = np.zeros((n,n), dtype=np.int16) for i in range(n): for j in range(n): if", "range(n): old_dist = D[i,j] new_dist = D[i,k] + D[k,j] if new_dist < old_dist:", "A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in range(n): for j in range(n):", "def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key", "n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in range(n): for j", "np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j", "j in range(n): old_dist = D[i,j] new_dist = D[i,k] + D[k,j] if new_dist", "index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges = item.pop(self.edges_key) node_feats = item.pop(self.node_features_key)", "numpy as np import numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET =", "dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] = dist_mat", "item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key]", "E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] =", "in range(n): for i in range(n): for j in range(n): old_dist = D[i,j]", "np.zeros((n,n), dtype=np.int16) for i in range(n): for j in range(n): if i ==", "new_dist < old_dist: D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats,", "= 1 for k in range(n): for i in range(n): for j in", ".graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n", "8 @nb.njit def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i", "edge_feats[k] D = floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key", "NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n = A.shape[0] D", "preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats", "**kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item =", "= np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j] = 1", "E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs)", "'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item", "D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16)", "E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def", "elif A[i,j] == 0: D[i,j] = 510 else: D[i,j] = 1 for k", "if new_dist < old_dist: D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes, edges,", "def floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in range(n):", "node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16)", "= np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1]", "np import numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET", "= 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key = feature_matrix_key def __getitem__(self, index):", "self.feature_matrix_key = feature_matrix_key def __getitem__(self, index): item = super().__getitem__(index) num_nodes = int(item[self.num_nodes_key]) edges", "np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]): i,j = edges[k,0], edges[k,1] A[i,j]", "return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1,", "edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats +", "dtype=np.int16) for i in range(n): for j in range(n): if i == j:", "__init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs): super().__init__(**kwargs) self.distance_matrix_key = distance_matrix_key self.feature_matrix_key", "== 0: D[i,j] = 510 else: D[i,j] = 1 for k in range(n):", "as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit", "+ np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in range(edges.shape[0]):", "node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats) item[self.node_features_key] = node_feats item[self.distance_matrix_key] =", "== j: pass elif A[i,j] == 0: D[i,j] = 510 else: D[i,j] =", "edges[k,0], edges[k,1] A[i,j] = 1 E[i,j] = edge_feats[k] D = floyd_warshall(A) return node_feats,", "D[i,k] + D[k,j] if new_dist < old_dist: D[i,j] = new_dist return D @nb.njit", "= 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A): n = A.shape[0] D =", "D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key = 'feature_matrix', **kwargs):", "floyd_warshall(A): n = A.shape[0] D = np.zeros((n,n), dtype=np.int16) for i in range(n): for", "node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A", "item.pop(self.edges_key) node_feats = item.pop(self.node_features_key) edge_feats = item.pop(self.edge_features_key) node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges,", "k in range(n): for i in range(n): for j in range(n): old_dist =", "def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats =", "as np import numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128", "edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16) for k in", "= D[i,j] new_dist = D[i,k] + D[k,j] if new_dist < old_dist: D[i,j] =", "in range(n): if i == j: pass elif A[i,j] == 0: D[i,j] =", "1 for k in range(n): for i in range(n): for j in range(n):", "old_dist: D[i,j] = new_dist return D @nb.njit def preprocess_data(num_nodes, edges, node_feats, edge_feats): node_feats", "node_feats, edge_feats): node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1,", "= edge_feats[k] D = floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self,", "+ np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1, NODE_FEATURES_OFFSET,dtype=np.int16) edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1, EDGE_FEATURES_OFFSET,dtype=np.int16) A = np.zeros((num_nodes,num_nodes),dtype=np.int16) E", "floyd_warshall(A) return node_feats, D, E class StructuralDataset(GraphDataset): def __init__(self, distance_matrix_key = 'distance_matrix', feature_matrix_key", "range(n): for j in range(n): if i == j: pass elif A[i,j] ==", "for k in range(n): for i in range(n): for j in range(n): old_dist", "else: D[i,j] = 1 for k in range(n): for i in range(n): for", "range(n): for j in range(n): old_dist = D[i,j] new_dist = D[i,k] + D[k,j]", "numba as nb from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8", "= 510 else: D[i,j] = 1 for k in range(n): for i in", "from .graph_dataset import GraphDataset NODE_FEATURES_OFFSET = 128 EDGE_FEATURES_OFFSET = 8 @nb.njit def floyd_warshall(A):" ]
[ "<gh_stars>100-1000 import os import sphinx os.chdir('../../../docs_sphinx') sphinx.main(['sphinx-build', '-b', 'doctest', '.', '../docs', '-D', 'exclude_patterns=reference'])" ]
[]
[ "whether in contract, strict # liability or tort (including negligence or otherwise) arising", "1) for k in np.unique(label): N_k = np.sum(k == label) mu_rk_block = np.zeros((0,", "= np.argsort(label) H = H[:,order] label = label[order] numerator, denominator = 0, 0", "or tort (including negligence or otherwise) arising in any way # out of", "np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r =", "H. Otherwise denominator will be wrong. References ---------- .. [1] <NAME>, <NAME>, Amari", "Tue Feb 11 12:29:35 2020 # Author: <NAME>, Purdue University # # #", "# # The original code came with the following disclaimer: # # This", "liability, whether in contract, strict # liability or tort (including negligence or otherwise)", "Real scalar value indicating fisher discriminant. Notes ----- This fisher discriminant is the", "possibility of # such damage. # import numpy as np def fisher_discriminant(H, label):", "N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r,", "In no event # shall Zhi Huang be liable for any direct, indirect,", "business interruption) however # caused and on any theory of liability, whether in", "= np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H - mu_rkn)**2) E_D =", "r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k ==", "neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order] label =", "algorithm and its application to the extraction of subtle emotional differences in speech.", "any kind, including, but not limited to, the warranties # of merchantability and", "kind, including, but not limited to, the warranties # of merchantability and fitness", "of the possibility of # such damage. # import numpy as np def", "(including, but not limited # to, loss of use, data or profits, or", "Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order] label", "implied # warranties of any kind, including, but not limited to, the warranties", "to label and H. Otherwise denominator will be wrong. References ---------- .. [1]", "discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in", "samples. label : Class indices. Returns ------- E_D : Real scalar value indicating", "in np.unique(label): N_k = np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for r", "# such damage. # import numpy as np def fisher_discriminant(H, label): ''' Parameters", "order to label and H. Otherwise denominator will be wrong. References ---------- ..", "label) mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk", "software, even if advised of the possibility of # such damage. # import", "= H[:,order] label = label[order] numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0],", "profits, or business interruption) however # caused and on any theory of liability,", "mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk =", "= np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1) for k", "way # out of the use of this software, even if advised of", "2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order] label = label[order]", "extraction of subtle emotional differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. '''", "# # # The original code came with the following disclaimer: # #", "** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H -", "any theory of liability, whether in contract, strict # liability or tort (including", "limited # to, loss of use, data or profits, or business interruption) however", "further sorted in ascending order, then apply its order to label and H.", "= np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator += N_k * (mu_rk", "is further sorted in ascending order, then apply its order to label and", "Created on Tue Feb 11 12:29:35 2020 # Author: <NAME>, Purdue University #", "== label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator +=", "for k in np.unique(label): N_k = np.sum(k == label) mu_rk_block = np.zeros((0, N_k))", "the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order,", "emotional differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label)", "any direct, indirect, incidental, # special, exemplary or consequential damages (including, but not", "np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator += N_k * (mu_rk -", "np.argsort(label) H = H[:,order] label = label[order] numerator, denominator = 0, 0 mu_rkn", "# # This software is provided \"as-is\". There are no expressed or implied", "mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator += N_k *", "its order to label and H. Otherwise denominator will be wrong. References ----------", "All rights reserved # Created on Tue Feb 11 12:29:35 2020 # Author:", "its application to the extraction of subtle emotional differences in speech. Cognitive neurodynamics.", "use of this software, even if advised of the possibility of # such", "out of the use of this software, even if advised of the possibility", "(3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order, then apply", "denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H,", "damage. # import numpy as np def fisher_discriminant(H, label): ''' Parameters ---------- H", "fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued matrix with columns indicating samples.", "label is further sorted in ascending order, then apply its order to label", "0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1) for k in np.unique(label):", "Huang be liable for any direct, indirect, incidental, # special, exemplary or consequential", "References ---------- .. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm", "k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator", "to, the warranties # of merchantability and fitness for a given application. In", "Author: <NAME>, Purdue University # # # The original code came with the", "scalar value indicating fisher discriminant. Notes ----- This fisher discriminant is the equation", "''' Parameters ---------- H : Real-valued matrix with columns indicating samples. label :", "label = label[order] numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all", "of the use of this software, even if advised of the possibility of", "advised of the possibility of # such damage. # import numpy as np", "in ascending order, then apply its order to label and H. Otherwise denominator", "if advised of the possibility of # such damage. # import numpy as", "warranties of any kind, including, but not limited to, the warranties # of", "label and H. Otherwise denominator will be wrong. References ---------- .. [1] <NAME>,", "0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis =", "fisher discriminant. Notes ----- This fisher discriminant is the equation (3 a,b) in", "rights reserved # Created on Tue Feb 11 12:29:35 2020 # Author: <NAME>,", ".. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its", "not limited to, the warranties # of merchantability and fitness for a given", "Zhi Huang be liable for any direct, indirect, incidental, # special, exemplary or", "special, exemplary or consequential damages (including, but not limited # to, loss of", "application to the extraction of subtle emotional differences in speech. Cognitive neurodynamics. 2012", "0) numerator += N_k * (mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn,", "* (mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1)", "# to, loss of use, data or profits, or business interruption) however #", "including, but not limited to, the warranties # of merchantability and fitness for", "= np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k", "------- E_D : Real scalar value indicating fisher discriminant. Notes ----- This fisher", "np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k *", "---------- .. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and", "# The original code came with the following disclaimer: # # This software", "and fitness for a given application. In no event # shall Zhi Huang", "consequential damages (including, but not limited # to, loss of use, data or", "mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1) for", "= 1) for k in np.unique(label): N_k = np.sum(k == label) mu_rk_block =", "value indicating fisher discriminant. Notes ----- This fisher discriminant is the equation (3", "N_k).reshape(1,N_k)), axis = 0) numerator += N_k * (mu_rk - mu_r) ** 2", "Returns ------- E_D : Real scalar value indicating fisher discriminant. Notes ----- This", "Notes ----- This fisher discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label", "and its application to the extraction of subtle emotional differences in speech. Cognitive", "application. In no event # shall Zhi Huang be liable for any direct,", "order = np.argsort(label) H = H[:,order] label = label[order] numerator, denominator = 0,", "= 1) denominator = np.sum((H - mu_rkn)**2) E_D = numerator / denominator return", "* np.sum(H, axis = 1) for k in np.unique(label): N_k = np.sum(k ==", "Amari SI. A new discriminant NMF algorithm and its application to the extraction", "in any way # out of the use of this software, even if", "event # shall Zhi Huang be liable for any direct, indirect, incidental, #", "software is provided \"as-is\". There are no expressed or implied # warranties of", "# liability or tort (including negligence or otherwise) arising in any way #", "the following disclaimer: # # This software is provided \"as-is\". There are no", "np def fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued matrix with columns", "label : Class indices. Returns ------- E_D : Real scalar value indicating fisher", "\"as-is\". There are no expressed or implied # warranties of any kind, including,", "NMF algorithm and its application to the extraction of subtle emotional differences in", "1/H.shape[1] * np.sum(H, axis = 1) for k in np.unique(label): N_k = np.sum(k", "no event # shall Zhi Huang be liable for any direct, indirect, incidental,", "indirect, incidental, # special, exemplary or consequential damages (including, but not limited #", "label): ''' Parameters ---------- H : Real-valued matrix with columns indicating samples. label", "the use of this software, even if advised of the possibility of #", "or implied # warranties of any kind, including, but not limited to, the", "E_D : Real scalar value indicating fisher discriminant. Notes ----- This fisher discriminant", "k in np.unique(label): N_k = np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for", "Otherwise denominator will be wrong. References ---------- .. [1] <NAME>, <NAME>, Amari SI.", "discriminant NMF algorithm and its application to the extraction of subtle emotional differences", "not limited # to, loss of use, data or profits, or business interruption)", "use, data or profits, or business interruption) however # caused and on any", "liable for any direct, indirect, incidental, # special, exemplary or consequential damages (including,", "otherwise) arising in any way # out of the use of this software,", "<NAME>, Amari SI. A new discriminant NMF algorithm and its application to the", "2020 # Author: <NAME>, Purdue University # # # The original code came", "np.unique(label): N_k = np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for r in", "however # caused and on any theory of liability, whether in contract, strict", "for a given application. In no event # shall Zhi Huang be liable", "as np def fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued matrix with", "numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] *", "on Tue Feb 11 12:29:35 2020 # Author: <NAME>, Purdue University # #", "but not limited to, the warranties # of merchantability and fitness for a", "of merchantability and fitness for a given application. In no event # shall", "Dec 1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order] label = label[order] numerator,", "the possibility of # such damage. # import numpy as np def fisher_discriminant(H,", "* np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis =", "and H. Otherwise denominator will be wrong. References ---------- .. [1] <NAME>, <NAME>,", "<NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its application to", "denominator will be wrong. References ---------- .. [1] <NAME>, <NAME>, Amari SI. A", "speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order]", "for any direct, indirect, incidental, # special, exemplary or consequential damages (including, but", "----- This fisher discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is", "sorted in ascending order, then apply its order to label and H. Otherwise", "= 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis", "11 12:29:35 2020 # Author: <NAME>, Purdue University # # # The original", "# warranties of any kind, including, but not limited to, the warranties #", "mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk]", "of use, data or profits, or business interruption) however # caused and on", "tort (including negligence or otherwise) arising in any way # out of the", "differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H", "ascending order, then apply its order to label and H. Otherwise denominator will", "1) denominator = np.sum((H - mu_rkn)**2) E_D = numerator / denominator return E_D", "direct, indirect, incidental, # special, exemplary or consequential damages (including, but not limited", "# special, exemplary or consequential damages (including, but not limited # to, loss", "disclaimer: # # This software is provided \"as-is\". There are no expressed or", "1;6(6):525-35. ''' order = np.argsort(label) H = H[:,order] label = label[order] numerator, denominator", "in contract, strict # liability or tort (including negligence or otherwise) arising in", "mu_rk_block), axis = 1) denominator = np.sum((H - mu_rkn)**2) E_D = numerator /", "contract, strict # liability or tort (including negligence or otherwise) arising in any", "of # such damage. # import numpy as np def fisher_discriminant(H, label): '''", "= 1/N_k * np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)),", "are no expressed or implied # warranties of any kind, including, but not", "mu_rk = 1/N_k * np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] *", "liability or tort (including negligence or otherwise) arising in any way # out", "in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order = np.argsort(label) H =", "2020 <NAME>. All rights reserved # Created on Tue Feb 11 12:29:35 2020", "H = H[:,order] label = label[order] numerator, denominator = 0, 0 mu_rkn =", "arising in any way # out of the use of this software, even", "exemplary or consequential damages (including, but not limited # to, loss of use,", "Class indices. Returns ------- E_D : Real scalar value indicating fisher discriminant. Notes", "on any theory of liability, whether in contract, strict # liability or tort", "2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H - mu_rkn)**2)", "a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order, then apply its", "N_k = np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]):", "Real-valued matrix with columns indicating samples. label : Class indices. Returns ------- E_D", "damages (including, but not limited # to, loss of use, data or profits,", "np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H - mu_rkn)**2) E_D = numerator", "of liability, whether in contract, strict # liability or tort (including negligence or", "0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1)", "= np.sum(k == label) mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r", "strict # liability or tort (including negligence or otherwise) arising in any way", "axis = 1) for k in np.unique(label): N_k = np.sum(k == label) mu_rk_block", "mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k == label]) mu_rk_block =", "shall Zhi Huang be liable for any direct, indirect, incidental, # special, exemplary", "will be wrong. References ---------- .. [1] <NAME>, <NAME>, Amari SI. A new", "---------- H : Real-valued matrix with columns indicating samples. label : Class indices.", "code came with the following disclaimer: # # This software is provided \"as-is\".", "matrix with columns indicating samples. label : Class indices. Returns ------- E_D :", "incidental, # special, exemplary or consequential damages (including, but not limited # to,", "# Copyright 2020 <NAME>. All rights reserved # Created on Tue Feb 11", "* N_k).reshape(1,N_k)), axis = 0) numerator += N_k * (mu_rk - mu_r) **", "or business interruption) however # caused and on any theory of liability, whether", "provided \"as-is\". There are no expressed or implied # warranties of any kind,", "loss of use, data or profits, or business interruption) however # caused and", "There are no expressed or implied # warranties of any kind, including, but", "and on any theory of liability, whether in contract, strict # liability or", "<NAME>, Purdue University # # # The original code came with the following", "H : Real-valued matrix with columns indicating samples. label : Class indices. Returns", "interruption) however # caused and on any theory of liability, whether in contract,", "to the extraction of subtle emotional differences in speech. Cognitive neurodynamics. 2012 Dec", "no expressed or implied # warranties of any kind, including, but not limited", "the warranties # of merchantability and fitness for a given application. In no", ": Real scalar value indicating fisher discriminant. Notes ----- This fisher discriminant is", "import numpy as np def fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued", "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order, then apply its order to", "is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending", "label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator += N_k", "N_k * (mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis =", "This fisher discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further", "# Created on Tue Feb 11 12:29:35 2020 # Author: <NAME>, Purdue University", "is provided \"as-is\". There are no expressed or implied # warranties of any", "expressed or implied # warranties of any kind, including, but not limited to,", "any way # out of the use of this software, even if advised", "# import numpy as np def fisher_discriminant(H, label): ''' Parameters ---------- H :", "H[:,order] label = label[order] numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0))", ": Class indices. Returns ------- E_D : Real scalar value indicating fisher discriminant.", "(mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator", "mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H - mu_rkn)**2) E_D", "be liable for any direct, indirect, incidental, # special, exemplary or consequential damages", "or otherwise) arising in any way # out of the use of this", "(including negligence or otherwise) arising in any way # out of the use", "discriminant. Notes ----- This fisher discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075", "or profits, or business interruption) however # caused and on any theory of", "following disclaimer: # # This software is provided \"as-is\". There are no expressed", "fisher discriminant is the equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted", "+= N_k * (mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis", "# This software is provided \"as-is\". There are no expressed or implied #", "then apply its order to label and H. Otherwise denominator will be wrong.", "of any kind, including, but not limited to, the warranties # of merchantability", "# caused and on any theory of liability, whether in contract, strict #", "A new discriminant NMF algorithm and its application to the extraction of subtle", "with the following disclaimer: # # This software is provided \"as-is\". There are", "merchantability and fitness for a given application. In no event # shall Zhi", "of subtle emotional differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order", "range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k == label]) mu_rk_block", "wrong. References ---------- .. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF", "def fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued matrix with columns indicating", "np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0)", "Feb 11 12:29:35 2020 # Author: <NAME>, Purdue University # # # The", "= mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block,", "data or profits, or business interruption) however # caused and on any theory", "the extraction of subtle emotional differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35.", "subtle emotional differences in speech. Cognitive neurodynamics. 2012 Dec 1;6(6):525-35. ''' order =", "# Author: <NAME>, Purdue University # # # The original code came with", "np.sum(H, axis = 1) for k in np.unique(label): N_k = np.sum(k == label)", "warranties # of merchantability and fitness for a given application. In no event", "Parameters ---------- H : Real-valued matrix with columns indicating samples. label : Class", "indicating fisher discriminant. Notes ----- This fisher discriminant is the equation (3 a,b)", "in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order, then apply its order", "new discriminant NMF algorithm and its application to the extraction of subtle emotional", "given application. In no event # shall Zhi Huang be liable for any", "this software, even if advised of the possibility of # such damage. #", "Purdue University # # # The original code came with the following disclaimer:", "= 1/H.shape[1] * np.sum(H, axis = 1) for k in np.unique(label): N_k =", "= 0) numerator += N_k * (mu_rk - mu_r) ** 2 mu_rkn =", "a given application. In no event # shall Zhi Huang be liable for", "to, loss of use, data or profits, or business interruption) however # caused", "even if advised of the possibility of # such damage. # import numpy", "indicating samples. label : Class indices. Returns ------- E_D : Real scalar value", "Copyright 2020 <NAME>. All rights reserved # Created on Tue Feb 11 12:29:35", "indices. Returns ------- E_D : Real scalar value indicating fisher discriminant. Notes -----", "mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator = np.sum((H", "equation (3 a,b) in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075 label is further sorted in ascending order, then", ": Real-valued matrix with columns indicating samples. label : Class indices. Returns -------", "The original code came with the following disclaimer: # # This software is", "This software is provided \"as-is\". There are no expressed or implied # warranties", "12:29:35 2020 # Author: <NAME>, Purdue University # # # The original code", "in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k == label])", "mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1) for k in np.unique(label): N_k", "theory of liability, whether in contract, strict # liability or tort (including negligence", "negligence or otherwise) arising in any way # out of the use of", "of this software, even if advised of the possibility of # such damage.", "but not limited # to, loss of use, data or profits, or business", "# of merchantability and fitness for a given application. In no event #", "numpy as np def fisher_discriminant(H, label): ''' Parameters ---------- H : Real-valued matrix", "such damage. # import numpy as np def fisher_discriminant(H, label): ''' Parameters ----------", "or consequential damages (including, but not limited # to, loss of use, data", "caused and on any theory of liability, whether in contract, strict # liability", "reserved # Created on Tue Feb 11 12:29:35 2020 # Author: <NAME>, Purdue", "= label[order] numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all =", "University # # # The original code came with the following disclaimer: #", "[1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its application", "- mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1) denominator =", "np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1) for k in", "<NAME>. All rights reserved # Created on Tue Feb 11 12:29:35 2020 #", "fitness for a given application. In no event # shall Zhi Huang be", "1/N_k * np.sum(H[r, k == label]) mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis", "apply its order to label and H. Otherwise denominator will be wrong. References", "came with the following disclaimer: # # This software is provided \"as-is\". There", "# shall Zhi Huang be liable for any direct, indirect, incidental, # special,", "with columns indicating samples. label : Class indices. Returns ------- E_D : Real", "''' order = np.argsort(label) H = H[:,order] label = label[order] numerator, denominator =", "limited to, the warranties # of merchantability and fitness for a given application.", "columns indicating samples. label : Class indices. Returns ------- E_D : Real scalar", "for r in range(H.shape[0]): mu_r = mu_r_all[r] mu_rk = 1/N_k * np.sum(H[r, k", "np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0) numerator += N_k * (mu_rk - mu_r)", "original code came with the following disclaimer: # # This software is provided", "label[order] numerator, denominator = 0, 0 mu_rkn = np.zeros((H.shape[0], 0)) mu_r_all = 1/H.shape[1]", "axis = 1) denominator = np.sum((H - mu_rkn)**2) E_D = numerator / denominator", "# out of the use of this software, even if advised of the", "axis = 0) numerator += N_k * (mu_rk - mu_r) ** 2 mu_rkn", "numerator += N_k * (mu_rk - mu_r) ** 2 mu_rkn = np.concatenate((mu_rkn, mu_rk_block),", "be wrong. References ---------- .. [1] <NAME>, <NAME>, Amari SI. A new discriminant", "order, then apply its order to label and H. Otherwise denominator will be", "SI. A new discriminant NMF algorithm and its application to the extraction of", "== label) mu_rk_block = np.zeros((0, N_k)) for r in range(H.shape[0]): mu_r = mu_r_all[r]" ]
[ "for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY,", "remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto", "# We need this to ensure the server can check if the peer", "None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY", "asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return", "= list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name ==", "Perform p2p/sub-proto handshake, completing the full handshake and causing a new peer to", "chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def server(): server", "asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1 # Stop our peer", "cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from", "MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server =", "= s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS =", "initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert", "= Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server", "mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full handshake", "chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID,", "started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to ensure the server", "@pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield", "await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(),", "pytest import socket from eth_keys import keys from cancel_token import CancelToken from eth.chains.ropsten", "await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await", "from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, ) from p2p.kademlia", "timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to ensure", "from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def", "server can check if the peer pool is full for # incoming connections.", "new peer to be # added to the server's pool. await initiator_peer.do_p2p_handshake() await", "PeerPool, ) from p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer", "Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False", "p2p.auth import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, ) from p2p.kademlia import", "timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto", "can check if the peer pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer,", "tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port", "mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to ensure", "def is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes) async def next_peer(self): return", "get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio", "peer_class=peer_class, ) return server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer)", "aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader, writer, token) initiator_peer =", "p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server import", "RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS =", "address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def", "use_eip8, token) reader, writer = await initiator.connect() # Send auth init message to", "= Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes = {} def __init__(self):", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port", "check if the peer pool is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool',", "INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes = {} def", "reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac,", "isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version", "ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb,", "[RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack the handshake.", "await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1 # Stop our", "self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes)", "chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def server():", "= ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb,", "writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac,", "the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to be", "the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1 #", "eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from", "monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE]", "'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8,", "asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(),", "= get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1)", "test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer,", "to ensure the server can check if the peer pool is full for", "await initiator.connect() # Send auth init message to the server, then read and", "Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB", "We need this to ensure the server can check if the peer pool", "= await _handshake( initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader,", "monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to ensure the server can check", "a new peer to be # added to the server's pool. await initiator_peer.do_p2p_handshake()", "len(pool.connected_nodes) == 1 # Stop our peer to make sure its pending asyncio", "the receiver_server a chance to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) ==", "asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server,", "pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer,", "import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, )", "__init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def", "is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes) async def next_peer(self): return await", "if the peer pool is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool())", "mock_start_peer) # We need this to ensure the server can check if the", "tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY =", "import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer", "False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer =", "processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer,", "def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close()", "{} def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer", "= RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class,", "= {} def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] =", "token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb,", "INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS)", "import ( PeerPool, ) from p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer", "import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory", "99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE", "receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer)", "base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY,", "a chance to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert", "async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True", "== 1 assert len(pool.connected_nodes) == 1 # Stop our peer to make sure", "peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes) async def", "to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) ==", "handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1 # Stop", "timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server =", "eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s =", "= RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS", "event_loop): # We need this to ensure the server can check if the", "to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to", "return server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(),", "if the peer pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool())", "return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db", "Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from tests.p2p.auth_constants", "= peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes) async", "return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port)", "DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def", "0)) s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID =", "s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID", "def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger()", "peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self): return", "import keys from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain", "HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, ) from p2p.kademlia import ( Node,", "get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY", "import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import", "FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server", "peer pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool =", "await _handshake( initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer,", "from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1)", "def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer)", "and causing a new peer to be # added to the server's pool.", "import ETHPeer from trinity.server import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import", "to the server, then read and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac", "server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need", "async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to ensure the server", "assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop,", "assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio", "return True def __len__(self): return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def", "writer = await initiator.connect() # Send auth init message to the server, then", "assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1 # Stop our peer to", "server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return", "server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1)", "await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer)", "assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version", "headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def server(): server =", "= keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE =", "started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) #", "to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0]", "async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server", "assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert", "full handshake and causing a new peer to be # added to the", "initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async", "causing a new peer to be # added to the server's pool. await", "tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to be processed", "auth init message to the server, then read and decode auth ack aes_secret,", "RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key'])", "is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token", "handshake and causing a new peer to be # added to the server's", "and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader,", "len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db =", "Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture", "def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer)", "= PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give", "s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port,", "peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain =", "privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake,", "RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, )", "connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE,", "MockPeerPool: is_full = False connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue() async", "Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from tests.p2p.auth_constants import", "server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We", "token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await", "is_full = False connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue() async def", "yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY,", "== 1 # Stop our peer to make sure its pending asyncio tasks", "import asyncio import pytest import socket from eth_keys import keys from cancel_token import", "DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0))", "receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers =", "1 # Stop our peer to make sure its pending asyncio tasks are", "await pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack the handshake. await", "ETHPeer from trinity.server import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer", "= 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key", "INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE", "await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer():", "from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import (", "class MockPeerPool: is_full = False connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue()", "pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack the handshake. await asyncio.sleep(0.1)", "receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch,", "@pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to ensure the", "server, event_loop): # We need this to ensure the server can check if", "server, then read and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await", "# incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator", "check if the peer pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool',", "'_start_peer', mock_start_peer) # We need this to ensure the server can check if", "socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port()", "1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert", "SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1)", "= await initiator.connect() # Send auth init message to the server, then read", "udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY", "import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, ) from p2p.kademlia import (", "Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes = {} def __init__(self): self._new_peers", "socket from eth_keys import keys from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain,", "= FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey,", "__len__(self): return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class):", "start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self):", "Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() +", "= get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key'])", "# Stop our peer to make sure its pending asyncio tasks are cancelled.", "trinity.server import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers", "s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1',", "message to the server, then read and decode auth ack aes_secret, mac_secret, egress_mac,", "def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db)", "ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import", "connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote]", "async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server", "# Send auth init message to the server, then read and decode auth", "be # added to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait", "completing the full handshake and causing a new peer to be # added", "asyncio import pytest import socket from eth_keys import keys from cancel_token import CancelToken", "( PeerPool, ) from p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer import", "= False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer", "await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this", "MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool, ) from", "init message to the server, then read and decode auth ack aes_secret, mac_secret,", "Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS)", "the server can check if the peer pool is full for # incoming", "import pytest import socket from eth_keys import keys from cancel_token import CancelToken from", "headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full handshake and causing a", "p2p/sub-proto handshake, completing the full handshake and causing a new peer to be", "initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1)", "pool is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False", "test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to ensure the server can check", "CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import", "def __len__(self): return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def get_server(privkey, address,", "peer pool is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 =", "PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the", "True def __len__(self): return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get() def get_server(privkey,", "token) reader, writer = await initiator.connect() # Send auth init message to the", "ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version ==", "aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full", "from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake", "monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY,", "server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(),", "assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = []", "auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader, writer, token)", "def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to", "not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey ==", "is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()),", "SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async", "base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db)", "assert len(pool.connected_nodes) == 1 # Stop our peer to make sure its pending", "from p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server", "_handshake from p2p.peer import ( PeerPool, ) from p2p.kademlia import ( Node, Address,", "egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full handshake and", "from trinity.server import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from", "receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async", "def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to ensure the server can", "await initiator_peer.do_sub_proto_handshake() # wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert", "= HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect() # Send auth", "connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes =", "HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect() # Send auth init", "receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name", "import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\",", "INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool:", "# wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) ==", "initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID)", "initiator_peer.do_sub_proto_handshake() # wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes)", "next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb =", "INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect() # Send auth init message", "_handshake( initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret,", "network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS,", "full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID,", ") from p2p.kademlia import ( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from", "use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader,", "then read and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(", "= ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) #", "self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb =", "CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect() #", "await self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb", "list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name", "incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator =", "chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain,", "receiver_server a chance to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1", ") from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from tests.p2p.auth_constants import eip8_values", "get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER)", "is not None assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey", "ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def", "egress_mac, ingress_mac = await _handshake( initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote,", "need this to ensure the server can check if the peer pool is", "async def next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB()", "event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer',", "# Give the receiver_server a chance to ack the handshake. await asyncio.sleep(0.1) assert", "Stop our peer to make sure its pending asyncio tasks are cancelled. await", "headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server(", "import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port =", "initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer):", "from eth_keys import keys from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER", "RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key", "nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to ensure the", "the peer pool is full for # incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool", "Give the receiver_server a chance to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers)", "= Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY,", ") return server @pytest.fixture async def server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await", "eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from", "= False connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self,", "return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db)", "Send auth init message to the server, then read and decode auth ack", "peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer =", "pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer to be processed await", "= CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect()", "1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes = {}", "async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this", "= Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full =", "RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1',", "server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS,", "INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full", "keys from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import", "= asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node):", "get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return", "get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes", "# incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple())", "writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the", "SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async", "== 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not None", "= MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server", "this to ensure the server can check if the peer pool is full", "[] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need", "await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch,", "mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader, writer, token) initiator_peer = ETHPeer(", "ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes) == 1", "== RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def", "trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer", "@pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield", "== receiver_peer.sub_proto.name assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def", "<filename>tests/trinity/core/p2p-proto/test_server.py import asyncio import pytest import socket from eth_keys import keys from cancel_token", "the peer pool is full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8", "from p2p.peer import ( PeerPool, ) from p2p.kademlia import ( Node, Address, )", "initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret,", "False connected_nodes = {} def __init__(self): self._new_peers = asyncio.Queue() async def start_peer(self, peer):", "keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY,", "the full handshake and causing a new peer to be # added to", "can check if the peer pool is full for # incoming connections. monkeypatch.setattr(server,", "p2p.peer import ( PeerPool, ) from p2p.kademlia import ( Node, Address, ) from", "read and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator,", "timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop):", "MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\") initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token)", "self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def __len__(self): return len(self.connected_nodes) async def next_peer(self):", "def next_peer(self): return await self._new_peers.get() def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb", "decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader, writer,", "server(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await", "INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a chance", "pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) #", "yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): #", "the server, then read and decode auth ack aes_secret, mac_secret, egress_mac, ingress_mac =", "incoming connections. monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes", "= Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port()", "from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from tests.p2p.auth_constants import eip8_values from", "initiator.connect() # Send auth init message to the server, then read and decode", "s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99", "= get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1)", "SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE =", "server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer)", "import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth", "FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1]", "to be # added to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() #", "= keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY", "= INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class", "RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import", "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port", "+ 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes =", "for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token = CancelToken(\"<PASSWORD>\")", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((\"\", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port =", "NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a", "server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(),", "network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full handshake and causing a new", "from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB", "full for # incoming connections. monkeypatch.setattr(server, 'peer_pool', MockPeerPool()) use_eip8 = False token =", "receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await", "ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB from eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator,", "async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers", "node): return True def __len__(self): return len(self.connected_nodes) async def next_peer(self): return await self._new_peers.get()", "timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1)", "reader, writer = await initiator.connect() # Send auth init message to the server,", "ingress_mac = await _handshake( initiator, reader, writer, token) initiator_peer = ETHPeer( remote=initiator.remote, privkey=initiator.privkey,", "@pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer): nonlocal", "eth.db.backends.memory import MemoryDB from p2p.auth import HandshakeInitiator, _handshake from p2p.peer import ( PeerPool,", "import ( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server", "from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from eth.db.chain import ChainDB", "def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self, node): return True def", "tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a chance to", "peer to be # added to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake()", "1 assert len(pool.connected_nodes) == 1 # Stop our peer to make sure its", "FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port,", "tests.p2p.auth_constants import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port():", "get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture", "# added to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for", "import eip8_values from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s", "privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server @pytest.fixture async", "MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes)", "initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token) reader, writer = await initiator.connect() # Send", "address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain", "ensure the server can check if the peer pool is full for #", "added to the server's pool. await initiator_peer.do_p2p_handshake() await initiator_peer.do_sub_proto_handshake() # wait for peer", "chance to ack the handshake. await asyncio.sleep(0.1) assert len(started_peers) == 1 assert len(pool.connected_nodes)", "port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY", "( Node, Address, ) from trinity.protocol.eth.peer import ETHPeer from trinity.server import Server from", "= [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack the", "== receiver_peer.sub_proto.version assert receiver_peer.privkey == RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers", "our peer to make sure its pending asyncio tasks are cancelled. await list(pool.connected_nodes.values())[0].cancel()", "INITIATOR_ADDRESS) class MockPeerPool: is_full = False connected_nodes = {} def __init__(self): self._new_peers =", "eth_keys import keys from cancel_token import CancelToken from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from", "asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger() await asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.fixture async def receiver_server_with_dumb_peer(): server", "# Perform p2p/sub-proto handshake, completing the full handshake and causing a new peer", "from tests.trinity.core.dumb_peer import DumbPeer from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET,", "be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert", "port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY =", "RECEIVER_PRIVKEY @pytest.mark.asyncio async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer): started_peers = [] async def mock_start_peer(peer):", "reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing", "keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY =", "len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is not", "for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer", "port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS", "self._new_peers = asyncio.Queue() async def start_peer(self, peer): self.connected_nodes[peer.remote] = peer self._new_peers.put_nowait(peer) def is_valid_connection_candidate(self,", "ETHPeer( remote=initiator.remote, privkey=initiator.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform", "handshake, completing the full handshake and causing a new peer to be #", "nodes = [RECEIVER_REMOTE] await pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack", "import socket from eth_keys import keys from cancel_token import CancelToken from eth.chains.ropsten import", "started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We need this to ensure the server can", "ingress_mac=ingress_mac, headerdb=server.headerdb, network_id=NETWORK_ID) # Perform p2p/sub-proto handshake, completing the full handshake and causing", "NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY =", "def receiver_server_with_dumb_peer(): server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer) await asyncio.wait_for(server._start_tcp_listener(), timeout=1) yield server server.cancel_token.trigger()", "len(started_peers) == 1 assert len(pool.connected_nodes) == 1 # Stop our peer to make", "'peer_pool', MockPeerPool()) pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple()) nodes = [RECEIVER_REMOTE] await", "ack aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake( initiator, reader, writer, token) initiator_peer", "assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert initiator_peer.sub_proto is", "= [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer) # We", "asyncio.wait_for(server._close_tcp_listener(), timeout=1) @pytest.mark.asyncio async def test_server_incoming_connection(monkeypatch, server, event_loop): # We need this to", "chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db,", "wait for peer to be processed await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1", "asyncio.wait_for(server.peer_pool.next_peer(), timeout=1) assert len(server.peer_pool.connected_nodes) == 1 receiver_peer = list(server.peer_pool.connected_nodes.values())[0] assert isinstance(receiver_peer, ETHPeer) assert" ]
[ "= 64 nor_size = 512 number = 0 total = 20 gene_all_list =", "img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y),", "w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg',", "'../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/'", "makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/'", "mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\": gene_size = 64 nor_size", "= np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size,", "parents, dirnames, filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i in", "gene_size = 64 nor_size = 512 number = 0 total = 20 gene_all_list", "range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack)", "mask) return number if __name__ == \"__main__\": gene_size = 64 nor_size = 512", "h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region =", "0 total = 20 gene_all_list = [] nor_all_list = [] for parents, dirnames,", "[] for i in range(h_num): hstack = [] for j in range(w_num): img", "nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h,", "= make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number +=", "in filenames: nor_all_list.append(f) for i in range(total): number = inpainting(gene_all_list, nor_all_list, gene_size, nor_size,", "= h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region", "= 0 total = 20 gene_all_list = [] nor_all_list = [] for parents,", "= np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] =", "nor_all_list = [] for parents, dirnames, filenames in os.walk(inputGenedir): for f in filenames:", "def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0,", "img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255", "__name__ == \"__main__\": gene_size = 64 nor_size = 512 number = 0 total", "dirnames, filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i in range(total):", "= random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1,", "'../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = [] for", "make_region(gene_list, w_num, h_num): vstack = [] for i in range(h_num): hstack = []", "mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w]", "= '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def", "= [] for i in range(h_num): hstack = [] for j in range(w_num):", "cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask =", "np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number", "for f in filenames: nor_all_list.append(f) for i in range(total): number = inpainting(gene_all_list, nor_all_list,", "outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir)", "vstack = [] for i in range(h_num): hstack = [] for j in", "j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region", "(nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num * gene_size h_num_max = (nor_size-y)/gene_size", "random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] =", "#!/usr/bin/env python # coding=utf-8 ''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random", "h = h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1)", "= '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = []", "import tools import numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir =", "y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w =", "random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part =", "= 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ ==", "= [] for parents, dirnames, filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f)", "in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i in range(total): number =", "tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = [] for i in", "makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:]", "gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num,", "in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region =", "img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8)", "filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i in range(total): number", "'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8)", "for parents, dirnames, filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents,", "* gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list,", "in filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for f in filenames:", "mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\": gene_size", "= img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0),", "将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random import tools import numpy as", "= random.randint(1, h_num_max) h = h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list", "np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir", "<gh_stars>0 #!/usr/bin/env python # coding=utf-8 ''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import", "= random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part", "= '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir)", "image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list,", "img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x =", "nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num * gene_size", "'../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list,", "+= 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox)", "w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0])", "512 number = 0 total = 20 gene_all_list = [] nor_all_list = []", "inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2)", "255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\":", "cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label =", "gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num * gene_size", "np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if", "range(h_num): hstack = [] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image", "for f in filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for f", "# coding=utf-8 ''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random import tools", "= random.randint(1, w_num_max) w = w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num =", "mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__", "64 nor_size = 512 number = 0 total = 20 gene_all_list = []", "= [] nor_all_list = [] for parents, dirnames, filenames in os.walk(inputGenedir): for f", "h_num): vstack = [] for i in range(h_num): hstack = [] for j", "w_num = random.randint(1, w_num_max) w = w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num", "inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir", "= mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\": gene_size = 64", "numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir =", "outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack =", "for i in range(h_num): hstack = [] for j in range(w_num): img =", "= random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w]", "\"__main__\": gene_size = 64 nor_size = 512 number = 0 total = 20", "number if __name__ == \"__main__\": gene_size = 64 nor_size = 512 number =", "np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2)", "in range(h_num): hstack = [] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img)", "''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random import tools import numpy", "import os import cv2 import random import tools import numpy as np import", "= (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num * gene_size gene_list =", "mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask)", "cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\": gene_size = 64 nor_size =", "for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image)", "os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for", "= random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num", "(x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number,", "bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8)", "nor_size, number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size", "import cv2 import random import tools import numpy as np import makevocxml inputGenedir", "img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y =", "nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w", "[] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region =", "= cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox =", "img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return", "'../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/'", "total = 20 gene_all_list = [] nor_all_list = [] for parents, dirnames, filenames", "return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y", "np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region", "img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox", "dirnames, filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents, dirnames, filenames", "w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num * gene_size h_num_max", "= cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region", "= '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir =", "* gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num *", "cv2 import random import tools import numpy as np import makevocxml inputGenedir =", "(nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num * gene_size gene_list = random.sample(gene_all_list,", "(0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label)", "def make_region(gene_list, w_num, h_num): vstack = [] for i in range(h_num): hstack =", "in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir):", "f in filenames: nor_all_list.append(f) for i in range(total): number = inpainting(gene_all_list, nor_all_list, gene_size,", "tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = [] for i", "random.randint(1, h_num_max) h = h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list =", "i in range(h_num): hstack = [] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j])", "make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1", "= np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x = random.randint(0,", "1) img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region", "img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1,", "= w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h =", "np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg',", "'../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack", "[] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1)", "= 20 gene_all_list = [] nor_all_list = [] for parents, dirnames, filenames in", "outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num,", "= [] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack,", "random.randint(1, w_num_max) w = w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1,", "1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label", "hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list,", "random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num *", "w_num, h_num): vstack = [] for i in range(h_num): hstack = [] for", "axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number):", "as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/'", "number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num", "filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f)", "w_num_max) w = w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max)", "bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h,", "hstack = [] for j in range(w_num): img = cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image =", "== \"__main__\": gene_size = 64 nor_size = 512 number = 0 total =", "(x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape,", "nor_size = 512 number = 0 total = 20 gene_all_list = [] nor_all_list", "gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list, 1) img_region = make_region(gene_list, w_num, h_num)", "bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1),", "import random import tools import numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/'", "gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max =", "python # coding=utf-8 ''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random import", "tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = [] for i in range(h_num):", "return number if __name__ == \"__main__\": gene_size = 64 nor_size = 512 number", "= np.zeros((h,w,1), np.uint8) mask_region[:] = 255 mask[y:y+h, x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return", "random import tools import numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir", "img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1),", "= (nor_size-x)/gene_size w_num = random.randint(1, w_num_max) w = w_num * gene_size h_num_max =", "if __name__ == \"__main__\": gene_size = 64 nor_size = 512 number = 0", "gene_all_list = [] nor_all_list = [] for parents, dirnames, filenames in os.walk(inputGenedir): for", "tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num): vstack = [] for i in range(h_num): hstack", "number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region = np.zeros((h,w,1), np.uint8) mask_region[:] =", "cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part,", "inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir", "coding=utf-8 ''' 将生成patch聚合为癌症区域并嵌入到normal切块中 ''' import os import cv2 import random import tools import", "w = w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h", "cv2.imread(inputGenedir+gene_list[i*w_num+j]) hstack.append(img) image = np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def", "= cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor'])", "= [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask = np.zeros((nor_size,nor_size,1), np.uint8) mask_region", "''' import os import cv2 import random import tools import numpy as np", "np.concatenate(hstack, axis=1) vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size,", "outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir)", "h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part)", "20 gene_all_list = [] nor_all_list = [] for parents, dirnames, filenames in os.walk(inputGenedir):", "filenames: nor_all_list.append(f) for i in range(total): number = inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number)", "import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir =", "img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = []", "x:x+w] = mask_region cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask) return number if __name__ == \"__main__\": gene_size =", "w_num * gene_size h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num", "x:x+w] = img_region number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h),", "= 512 number = 0 total = 20 gene_all_list = [] nor_all_list =", "number += 1 cv2.imwrite(outputdir+str(number)+'.jpg', img_part) img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg',", "h_num_max = (nor_size-y)/gene_size h_num = random.randint(1, h_num_max) h = h_num * gene_size gene_list", "os import cv2 import random import tools import numpy as np import makevocxml", "x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num =", "[] for parents, dirnames, filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for", "= '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir = '../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir =", "f in filenames: gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for f in", "random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max = (nor_size-x)/gene_size w_num = random.randint(1, w_num_max)", "tools import numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/'", "for parents, dirnames, filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i", "h_num_max) h = h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num) nor_list = random.sample(nor_all_list,", "parents, dirnames, filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents, dirnames,", "import numpy as np import makevocxml inputGenedir = '../../mydata/dcgan/dcgan_micro_512/' inputNordir = '../../mydata/dcgan/normal_part/' outputdir", "os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for i in range(total): number = inpainting(gene_all_list,", "nor_all_list, gene_size, nor_size, number): x = random.randint(0, nor_size-gene_size*2) y = random.randint(0, nor_size-gene_size*2) w_num_max", "cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir,", "vstack.append(image) img_region = np.concatenate(vstack) return img_region def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number): x", "[] nor_all_list = [] for parents, dirnames, filenames in os.walk(inputGenedir): for f in", "8) cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox) bbox_label = [] bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor']) makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label) mask", "filenames in os.walk(inputGenedir): for f in filenames: gene_all_list.append(f) for parents, dirnames, filenames in", "= '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir) tools.mkdir(outputmaskdir) def make_region(gene_list, w_num, h_num):", "img_region = make_region(gene_list, w_num, h_num) img_part = cv2.imread(inputNordir+nor_list[0]) img_part[y:y+h, x:x+w] = img_region number", "'../../mydata/dcgan/virtual_dataset/' outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/' outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/' outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/' tools.mkdir(outputdir) tools.mkdir(outputbboxdir) tools.mkdir(outputxmldir)", "gene_all_list.append(f) for parents, dirnames, filenames in os.walk(inputNordir): for f in filenames: nor_all_list.append(f) for", "number = 0 total = 20 gene_all_list = [] nor_all_list = [] for", "h_num = random.randint(1, h_num_max) h = h_num * gene_size gene_list = random.sample(gene_all_list, w_num*h_num)" ]
[ "hot\" : True, \"show warning\" : True } dataset = TianChiGuangdongDefect(config) indices =", "224, 3], \"mil\" : False, \"use cache\" : True, \"one hot\" : True,", "Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config = { \"output", "ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if", "\"use cache\" : True, \"one hot\" : True, \"show warning\" : True }", "in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0", "print(\"\") print(\"\") for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') #", "dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\")", "# print(label) # if img_bag is not None: # plt.figure(0) # plt.clf() #", "dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config = { \"output shape\" :", "for i in range(row): # for j in range(col): # plt.subplot(row, col, i", "3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in indices:", "sys sys.path.append('.') sys.path.append('../') import numpy as np import cv2 import tensorflow as tf", "= 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind", "\"mil\" : False, \"use cache\" : True, \"one hot\" : True, \"show warning\"", "# if img_bag is not None: # plt.figure(0) # plt.clf() # row =", ": True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = []", "\"output shape\" : [224, 224, 3], \"mil\" : False, \"use cache\" : True,", "if __name__ == '__main__': config = { \"output shape\" : [224, 224, 3],", "# plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config = { \"output shape\"", "dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag is not None: # plt.figure(0)", "0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in", "for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) #", "cv2 import tensorflow as tf import matplotlib.pyplot as plt from dataset.violence import Violence", "print(label) # if img_bag is not None: # plt.figure(0) # plt.clf() # row", "not None: # plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config = {", "True, \"one hot\" : True, \"show warning\" : True } dataset = TianChiGuangdongDefect(config)", "indices = dataset.get_image_indices('trainval') # for ind in indices: # img_bag, label = dataset.read_image_by_index(ind,", "# plt.clf() # row = 4 # col = int(len(img_bag) / row) #", "if img_bag is not None: # plt.figure(0) # plt.clf() # row = 4", "range(row): # for j in range(col): # plt.subplot(row, col, i * col+j+1) #", "print(\"round 2\") print(\"\") print(\"\") for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval',", "# for j in range(col): # plt.subplot(row, col, i * col+j+1) # plt.imshow(img_bag[i*col+j])", "indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag", "plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config = { \"output shape\" :", "dataset.get_image_indices('trainval') # for ind in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')", "as tf import matplotlib.pyplot as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import", "img_bag is not None: # plt.figure(0) # plt.clf() # row = 4 #", "[224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind", "4 # col = int(len(img_bag) / row) # print(len(img_bag), row, col) # for", "= int(len(img_bag) / row) # print(len(img_bag), row, col) # for i in range(row):", "import os import sys sys.path.append('.') sys.path.append('../') import numpy as np import cv2 import", "plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__':", "indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in indices: img, label", "dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in indices: img, label = dataset.read_image_by_index(ind,", "import cv2 import tensorflow as tf import matplotlib.pyplot as plt from dataset.violence import", "import tensorflow as tf import matplotlib.pyplot as plt from dataset.violence import Violence from", "print(len(indices)) img_list = [] for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval',", ": [224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for", "row, col) # for i in range(row): # for j in range(col): #", "None: # plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config = { \"output", "from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config = { \"output shape\"", "= dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\")", "import numpy as np import cv2 import tensorflow as tf import matplotlib.pyplot as", "plt.pause(1) config = { \"output shape\" : [224, 224, 3], } dataset =", "numpy as np import cv2 import tensorflow as tf import matplotlib.pyplot as plt", "2\") print(\"\") print(\"\") for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')", "int(len(img_bag) / row) # print(len(img_bag), row, col) # for i in range(row): #", "tensorflow as tf import matplotlib.pyplot as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect", "tf import matplotlib.pyplot as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect", "col) # for i in range(row): # for j in range(col): # plt.subplot(row,", "plt.figure(0) # plt.clf() # row = 4 # col = int(len(img_bag) / row)", "3], \"mil\" : False, \"use cache\" : True, \"one hot\" : True, \"show", "plt.clf() # row = 4 # col = int(len(img_bag) / row) # print(len(img_bag),", "is not None: # plt.figure(0) # plt.clf() # row = 4 # col", "# img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag is", "# print(label) # if img is not None: # plt.figure(0) # plt.clf() #", "{ \"output shape\" : [224, 224, 3], \"mil\" : False, \"use cache\" :", "= 4 # col = int(len(img_bag) / row) # print(len(img_bag), row, col) #", "# for ind in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') #", "# plt.figure(0) # plt.clf() # row = 4 # col = int(len(img_bag) /", "is not None: # plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config =", "# row = 4 # col = int(len(img_bag) / row) # print(len(img_bag), row,", "= { \"output shape\" : [224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices", "as np import cv2 import tensorflow as tf import matplotlib.pyplot as plt from", "= [] for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') #", "if img is not None: # plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1)", "224, 3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in", "as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ ==", "shape\" : [224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') #", "label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img is not None:", "i in range(row): # for j in range(col): # plt.subplot(row, col, i *", "img is not None: # plt.figure(0) # plt.clf() # plt.imshow(img) # plt.pause(1) config", "} dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in indices: #", "TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in indices: img,", "shape\" : [224, 224, 3], \"mil\" : False, \"use cache\" : True, \"one", "in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img", "dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config =", "for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1", "print(\"\") for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label)", "== '__main__': config = { \"output shape\" : [224, 224, 3], \"mil\" :", "img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img is not", "} dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind", "method='supervised') # print(label) # if img is not None: # plt.figure(0) # plt.clf()", "[] for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label)", "img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag is not", "in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if", "import sys sys.path.append('.') sys.path.append('../') import numpy as np import cv2 import tensorflow as", ": True, \"one hot\" : True, \"show warning\" : True } dataset =", ": True, \"show warning\" : True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval')", ": False, \"use cache\" : True, \"one hot\" : True, \"show warning\" :", "dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img is not None: # plt.figure(0)", "# plt.pause(1) config = { \"output shape\" : [224, 224, 3], } dataset", "col = int(len(img_bag) / row) # print(len(img_bag), row, col) # for i in", "phase='trainval', method='supervised') # print(label) # if img_bag is not None: # plt.figure(0) #", "/ row) # print(len(img_bag), row, col) # for i in range(row): # for", "= TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in indices: # img_bag, label", "# plt.imshow(img) # plt.pause(1) config = { \"output shape\" : [224, 224, 3],", "plt.imshow(img) # plt.pause(1) config = { \"output shape\" : [224, 224, 3], }", "# if img is not None: # plt.figure(0) # plt.clf() # plt.imshow(img) #", "in range(row): # for j in range(col): # plt.subplot(row, col, i * col+j+1)", "cache\" : True, \"one hot\" : True, \"show warning\" : True } dataset", "indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img is", "from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config", "config = { \"output shape\" : [224, 224, 3], \"mil\" : False, \"use", "row) # print(len(img_bag), row, col) # for i in range(row): # for j", "print(len(img_bag), row, col) # for i in range(row): # for j in range(col):", "indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count", "= dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag is not None: #", "# for i in range(row): # for j in range(col): # plt.subplot(row, col,", "label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img_bag is not None:", "import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__ == '__main__': config = {", "os import sys sys.path.append('.') sys.path.append('../') import numpy as np import cv2 import tensorflow", "dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for", "0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in indices: img, label", "= dataset.get_image_indices('trainval') # for ind in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval',", "False, \"use cache\" : True, \"one hot\" : True, \"show warning\" : True", "dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in indices:", "sys.path.append('.') sys.path.append('../') import numpy as np import cv2 import tensorflow as tf import", "= 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in indices: img,", "method='supervised') # print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\")", "= dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) # if img is not None: #", "plt.clf() # plt.imshow(img) # plt.pause(1) config = { \"output shape\" : [224, 224,", "{ \"output shape\" : [224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices =", "print(label) # if img is not None: # plt.figure(0) # plt.clf() # plt.imshow(img)", "phase='trainval', method='supervised') # print(label) # if img is not None: # plt.figure(0) #", "for ind in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label)", "not None: # plt.figure(0) # plt.clf() # row = 4 # col =", "j in range(col): # plt.subplot(row, col, i * col+j+1) # plt.imshow(img_bag[i*col+j]) # plt.pause(3)", "True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for", "sys.path.append('../') import numpy as np import cv2 import tensorflow as tf import matplotlib.pyplot", "'__main__': config = { \"output shape\" : [224, 224, 3], \"mil\" : False,", "[224, 224, 3], \"mil\" : False, \"use cache\" : True, \"one hot\" :", "# plt.clf() # plt.imshow(img) # plt.pause(1) config = { \"output shape\" : [224,", "ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 =", "for j in range(col): # plt.subplot(row, col, i * col+j+1) # plt.imshow(img_bag[i*col+j]) #", "warning\" : True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list =", "# col = int(len(img_bag) / row) # print(len(img_bag), row, col) # for i", "matplotlib.pyplot as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if __name__", "# print(len(img_bag), row, col) # for i in range(row): # for j in", "= { \"output shape\" : [224, 224, 3], \"mil\" : False, \"use cache\"", "\"output shape\" : [224, 224, 3], } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval')", "import matplotlib.pyplot as plt from dataset.violence import Violence from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect if", "label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count = 0", "dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in", "print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in indices: img, label = dataset.read_image_by_index(ind,", "TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in indices: # img_bag, label =", "import TianChiGuangdongDefect if __name__ == '__main__': config = { \"output shape\" : [224,", "None: # plt.figure(0) # plt.clf() # row = 4 # col = int(len(img_bag)", "phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round", "np import cv2 import tensorflow as tf import matplotlib.pyplot as plt from dataset.violence", "print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\") for ind in indices: img, label =", "TianChiGuangdongDefect if __name__ == '__main__': config = { \"output shape\" : [224, 224,", "print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\") print(\"\")", "config = { \"output shape\" : [224, 224, 3], } dataset = TianChiGuangdongDefect(config)", "= dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in indices: img, label =", "row = 4 # col = int(len(img_bag) / row) # print(len(img_bag), row, col)", "ind in indices: # img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) #", "__name__ == '__main__': config = { \"output shape\" : [224, 224, 3], \"mil\"", ": [224, 224, 3], \"mil\" : False, \"use cache\" : True, \"one hot\"", "method='supervised') # print(label) # if img_bag is not None: # plt.figure(0) # plt.clf()", "img_list = [] for ind in indices: img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')", "# print(label) dataset.time1 = 0.0 dataset.count = 0 print(\"\") print(\"\") print(\"round 2\") print(\"\")", "True, \"show warning\" : True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices))", "\"one hot\" : True, \"show warning\" : True } dataset = TianChiGuangdongDefect(config) indices", "img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised') # print(label) dataset.time1 = 0.0 dataset.count =", "\"show warning\" : True } dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list", "dataset = TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') # for ind in indices: # img_bag,", "= TianChiGuangdongDefect(config) indices = dataset.get_image_indices('trainval') print(len(indices)) img_list = [] for ind in indices:" ]
[ "len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\" t = timestamp()", "series of symbilic link complying to the # naming scheme imposed by the", "to \"2222\". Or, alternatively from the command line by the commaand \"hsserver_lagacy\". The", "self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command does not allow a list", "changed. If the backgroud image does not have the the same number of", "= LCLS mode frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width))", "Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05)", "a new background image, no reply get_state - reply is integer number containing", "not locally. If 'save_raw' is true (default: false), the image raw data is", "0) def is_reading (self): \"tells whether the chip is currently being read out\"", "\"read\" # is either \"queued\" or \"executing\" if (status & 0x00000300) != 0:", "a fresh the backgound image, which is substracted from every image after readout", "@property def connected(self): from tcp_client import connected return connected(self.ip_address) online = connected def", "to the format used on the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows", "detector out without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read = time() def", "saved as a file. The image file is written in background as a", "MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if", "error(\"Relative path of %r with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname =", "= time() def readout_raw(self): \"Reads the detector out without correcting and displaying the", "value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return", "gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date and time as formatted ASCCI", "4=error The exception is the 'state' field, which has only 0=idle and 8=busy.", "one detector\"\"\" if name is not None: self.name = name self.timeout = 1.0", "the third parameter (\"Server Arguments\" or \"Personal Name\") set to \"2222\". Or, alternatively", "= 7680/bin_factor # MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes", "name. from os.path import dirname,relpath,islink,exists from os import symlink,remove from shutil import rmtree", "= \"4.0.1\" # default name \"rayonix_detector\" may be overridden in subclass from logging", "= self.state_code() except: return True # bit mask 0x00444440 masks out error flags", "0x00003000) != 0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status =", "(status & 0x00010000) != 0: t+= [\"write queued\"] if (status & 0x00020000) !=", "a 4-bit code, with the following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception", "!= 0: t+= [\"write queued\"] if (status & 0x00020000) != 0: t+= [\"writing\"]", "not self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\")", "4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read bits", "with # the specified name. from os.path import dirname,relpath,islink,exists from os import symlink,remove", "0: t+= [\"write queued\"] if (status & 0x00020000) != 0: t+= [\"writing\"] if", "image is saved as a file. The image file is written in background", "readout(self,filename=None): \"\"\"Reads the detector. If a filename is given, the image is saved", "not triggered, 1= triggered frame transfer, 2 = bulb mode, 3 = LCLS", "while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command does not allow a", "is started from the MarCCD software from the Remote Control control panel with", "list of filenames # to be specified, but uses auto-generated filenames instead. #", "numpy import nan # Readout rate in frames per second as function of", "after the server has been restarted or after the bin factor has been", "online = connected def write(self,command): \"\"\"Sends a comman that does not generate a", "filenames instead. # As a work-araound generated a series of symbilic link complying", "\"idle\": sleep(0.05) # Need a valid background image before starting acquisition. if self.auto_bkg:", "software does not save to first image, which is a bad # image,", "number of digits for the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" #", "writefile,<filename>,1 - Save the last read image, no reply set_bin,8,8 - Use 512x512-pixel", "image. \"\"\" if filename != None: self.make_directory(filename) if not self.save_raw: if filename !=", "server from the local format to the format used on the MAR CCD", "# MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize", "rising edge starts acquisition, # falling edge initiates frame transfer/readout self.bulb_mode = 0", "is either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000) != 0) def state(self):", "2: 10, 3: 15, 4: 25, 5: 40, 6: 60, 8: 75, 10:", "4-7: acquire bits 8-11: read bits 12-15: correct bits 16-19: write bits 20-23:", "number of pixels of current the background image, e.g. \"2048,2048\" Reference: Rayonix HS", "the # naming scheme imposed by the 'start_series_triggered' command that # point ot", "once after starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, #", "= pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS style", "and 9 of the state code tell whether the task status of \"read\"", "not self.connected: return True return ((self.state_code() & 0x00000020) != 0) def is_reading (self):", "e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return", "== \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the message and", "symblix link redirects is to create an image with # the specified name.", "server, not locally. If 'save_raw' is true (default: false), the image raw data", "& 0x00000300) != 0) def is_correcting (self): \"tells whether the chip is currently", "= persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address, in", "2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\":", "t = [] if (status & 0x0000000F) == 6: t+= [\"unavailable\"] if (status", "reading, not clearing)\" # \"acquire\" field is \"executing\" if not self.connected: return True", "not valid backgournd image. self.auto_bkg = True # Whether to save corrected or", "Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound image, which is", "line by the commaand \"hsserver_lagacy\". The server understand the following commands: start -", "in sequence aquistion mode, cancel it. if not self.state() == \"idle\": self.abort() while", "Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value)", "\"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\" #", "For triggred image acquiation # 0: the rising edge of the trigger initiates", "tell whether the task status of \"read\" # is either \"queued\" or \"executing\"", "size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize", "the detector still requires 11 trigger pulses # to aquire 10 images. #", "0x00020000) != 0: t+= [\"writing\"] if (status & 0x00040000) != 0: t+= [\"write", "!= \"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() #", "the bin factor is changed. If the backgroud image does not have the", "This also acquires a background image, in case there is no valid background", "= win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS style to UNIX style.", "is currently being read out\" # bit 8 and 9 of the state", "same number of pixels as the last read image the correction as saving", "!= \"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def", "Author: <NAME> Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\"", "the continuous clearing. In case the CCD readout is in progess, execution is", "of current the background image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e", "def is_integrating (self): \"tells whether the chip is integrating mode (not reading, not", "1 ms\"\"\" from datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit", "image file is written in background as a pipelined operation. The function returns", "# naming scheme imposed by the 'start_series_triggered' command that # point ot the", "be overridden in subclass from logging import debug,info,warn,error import socket from time import", "!= 0: t+= [\"series error\"] state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts", "finish. while self.is_reading(): sleep(0.05) # Work-around for a bug where the detector remaingns", "self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a the backgound image for", "the local format to the format used on the MAR CCD compter. e.g.", "integer numbers, e.g. \"2,2\" get_size_bkg - reply is the number of pixels of", "not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and stores the result as", "<NAME>'s sample remote control server program \"marccd_server_socket\" with TCP port number 2222. Usage", "self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a the backgound image for the", "except: pass makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if", "Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default", "writable on the Rayonix computer. # E.g. user id 10660(xppopr) on \"xpp-daq\", versus", "information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try:", "a pipelined operation. The function returns immediately. The pathname of the file is", "# The detector will ignore an \"acquire_images_triggered\" command if not # in \"idle\"", "= not triggered, 1= triggered frame transfer, 2 = bulb mode, 3 =", "# to be specified, but uses auto-generated filenames instead. # As a work-araound", "an image with # the specified name. from os.path import dirname,relpath,islink,exists from os", "== 0: return True else: return False def is_integrating (self): \"tells whether the", "reply %r: %s\" % (reply,message)) return 0 # bit 8 and 9 of", "else \"\" # Try to expand a Windows drive letter to a UNC", "Remote control of the MAR CCD detector, using <NAME>'s sample remote control server", "Rayonix HS detector manual 0.3e Chapter 9: The Legacy Remote Mode for HS", "= \"\" for part in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname", "while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it", "\"\"\"Reads the detector. If a filename is given, the image is saved as", "precise to 1 ms\"\"\" from datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3]", "sys import stderr if len(message) == 0 or message[-1] != \"\\n\": message +=", "tcp_client import connected return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a comman", "sleep(0.05) # Work-around for a bug where the detector remaingns in \"reading\" state", "60, 8: 75, 10: 120} bin_factor = self.bin_factor if bin_factor in readout_rate: read_time", "7: t+= [\"error\"] if (status & 0x0000000F) == 8: t+= [\"busy\"] if (status", "debug,info,warn,error import socket from time import sleep,time from thread import allocate_lock class Rayonix_Detector(object):", "\"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a filename", "self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the last read image to a", "# in \"idle\" state. if not self.state() == \"idle\": self.abort() while self.state() !=", "computer filename_suffix: including the dot (.) number_field_width: number of digits for the filename", "height of the current background image in pixels. This value is important to", "pathname: return pathname end = \"/\" if pathname.endswith(\"/\") else \"\" # Try to", "task status of \"read\" # is either \"queued\" or \"executing\" return ((self.state_code() &", "image (after startup or binning changed). wait: The is a 0.2 s delay", "mode frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state()", "reply\"\"\" self.log(\"query %r\" % command) from tcp_client import query return query(self.ip_address,command) def state_code(self):", "is integer number containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7:", "the number of pixels of current the background image, e.g. \"2048,2048\" Reference: Rayonix", "self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by an external", "sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n):", "in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return", "specified, but uses auto-generated filenames instead. # As a work-araound generated a series", "to the transcript.\"\"\" from sys import stderr if len(message) == 0 or message[-1]", "Rayonix_Detector(object): \"\"\"This is to remote control the MAR CCD detector Using remote protocol", "name error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def", "image # if there is not valid backgournd image. self.auto_bkg = True #", "detector will ignore an \"acquire_images_triggered\" command if not # in \"idle\" state. if", "link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link) try: pathname =", "the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the last read image", "is to create an image with # the specified name. from os.path import", "testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector #", "(self): \"tells whether the chip is currently being read out\" # bit 8", "wait=False, do no wait for this to happen. \"\"\" ##t0 = time() #", "# omit microsconds def remote(pathname): \"\"\"This converts the pathname of a file on", "; share = parts[3] path = \"\" for part in parts[4:]: path +=", "= 0 # Keep track of when the detector was last read. self.last_read", "or \"executing\" return ((self.state_code() & 0x00000300) != 0) def is_correcting (self): \"tells whether", "& 0x00020000) != 0: t+= [\"writing\"] if (status & 0x00040000) != 0: t+=", "# By default verbose logging is enabled. Change when problem solved. logging =", "name = \"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True)", "except: return 0 def filesize(self,bin_factor): \"\"\"Image file size in bytes including headers bin_facor:", "\"/\" if pathname.endswith(\"/\") else \"\" # Try to expand a Windows drive letter", "import exists from sys import stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777)", "(status & 0x00020000) != 0: t+= [\"writing\"] if (status & 0x00040000) != 0:", "status = self.state_code() except: return \"\" # bit mask 0x00444440 masks out error", "a Windows drive letter to a UNC name. try: import win32wnet # Convert", "Detector Control Author: <NAME> Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__", "[\"dezinger error\"] if (status & 0x01000000) != 0: t+= [\"series queued\"] if (status", "mask 0x00444440 masks out error flags if (status & ~0x0444444F) == 0: return", "to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location", "def update_bkg(self): \"\"\"Updates the backgound image if needed, for instance after the server", "instead. # As a work-araound generated a series of symbilic link complying to", "if (status & 0x00000400) != 0: t+= [\"read error\"] if (status & 0x00001000)", "the previous image to finish. while self.is_reading(): sleep(0.05) # Work-around for a bug", "frame transfer mode. However, (as of # Jul 2014, version 0.3.10), the detector", "is given, the image is saved as a file. The image file is", "bulb mode, 3 = LCLS mode frame_trigger_type = 2 if self.bulb_mode else 1", "directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i", "and time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y", "- reads a new background image, no reply get_state - reply is integer", "The is a 0.2 s delay until te detectror enters \"integrating\" state, (maybe", "command if not # in \"idle\" state. if not self.state() == \"idle\": self.abort()", "frames per second as function of bin factor: readout_rate = {1: 2, 2:", "file system of the Rayonix computer\"\"\" # The detector will ignore an \"acquire_images_triggered\"", "import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by", "(status & 0x00001000) != 0: t+= [\"correct queued\"] if (status & 0x00002000) !=", "X and Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound image,", "state code tell whether the task status of \"correct\" # is either \"queued\"", "the chip is integrating mode (not reading, not clearing)\" # \"acquire\" field is", "[\"read error\"] if (status & 0x00001000) != 0: t+= [\"correct queued\"] if (status", "of # Jul 2014, version 0.3.10), the detector still requires 11 trigger pulses", "record every command and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self):", "- reply is two integer numbers, e.g. \"2,2\" get_size_bkg - reply is the", "protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\")", "set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def", "< 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\")", "# For triggred image acquiation # 0: the rising edge of the trigger", "control computer, compared # to the beamline control computer, so directories created via", "integration mode by stopping the continuous clearing. In case the CCD readout is", "file will fail. At startup, the background image is empty and this value", "the specified name. from os.path import dirname,relpath,islink,exists from os import symlink,remove from shutil", "the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure the directory", "with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except", "the server has been restarted or after the bin factor has been changed.", "try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname =", "persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address, in case", "the last readout is finished. This also acquires a background image, in case", "a command that generates a reply. Return the reply\"\"\" self.log(\"query %r\" % command)", "filename exists by create it, if necessary.\"\"\" if filename is None or filename", "the Remote Control control panel with the second parameter (\"Server command\" or \"Device", "from __future__ import with_statement \"\"\" Remote control of the MAR CCD detector, using", "to finish. while self.is_reading(): sleep(0.05) # Work-around for a bug where the detector", "% command) from tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as", "and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None):", "def is_reading (self): \"tells whether the chip is currently being read out\" #", "self.ignore_first_trigger: # The detector software does not save to first image, which is", "a file on a network file server from the local format to the", "file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import", "of teh given filename exists by create it, if necessary.\"\"\" if filename is", "acquires a background image, in case there is no valid background image (after", "the new # bin factor is read back. t = time() while self.get_bin_factor()", "0: t+= [\"read queued\"] if (status & 0x00000200) != 0: t+= [\"reading\"] if", "self = rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i", "command that generates a reply. Return the reply\"\"\" self.log(\"query %r\" % command) from", "path of %r with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i]", "image files for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6):", "valid pathname on file system of the Rayonix computer\"\"\" # The detector will", "= False # For triggred image acquiation # 0: the rising edge of", "!= 0: t+= [\"series queued\"] if (status & 0x02000000) != 0: t+= [\"acquiring", "return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date and time as formatted", "= [] for pathname in filenames: for i in range(0,level): pathname = dirname(pathname)", "filenames: for i in range(0,level): pathname = dirname(pathname) dirnames += [pathname] if all([n", "len(filenames) == 0: return [] if len(filenames) == 1: return dirname(filenames[0]) for level", "filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r' %", "0x0000000F) == 6: t+= [\"unavailable\"] if (status & 0x0000000F) == 7: t+= [\"error\"]", "import stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r:", "the correction is applied. \"\"\" if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05)", "False # For triggred image acquiation # 0: the rising edge of the", "self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a", "and height of the current background image in pixels. This value is important", "is to make the query method multi-thread safe. self.lock = allocate_lock() # If", "set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or \"Personal Name\") set", "with 4096x4096 pixels is not used, because the point-spread function of the fiber", "field, which has only 0=idle and 8=busy. writefile,<filename>,1 - Save the last read", "detector software have a the backgound image for the current bin mode, which", "The Legacy Remote Mode for HS Detector Control Author: <NAME> Date created: 2013-09-20", "the detector. If a filename is given, the image is saved as a", "readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make", "the last read image, no reply set_bin,8,8 - Use 512x512-pixel bin mode, no", "of filenames # to be specified, but uses auto-generated filenames instead. # As", "changed). wait: The is a 0.2 s delay until te detectror enters \"integrating\"", "self.make_directory(filename) if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if", "series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 =", "in case there is more than one detector\"\"\" if name is not None:", "& 0x00010000) != 0: t+= [\"write queued\"] if (status & 0x00020000) != 0:", "also added to the transcript.\"\"\" from sys import stderr if len(message) == 0", "image, when using triggered frame transfer mode. However, (as of # Jul 2014,", "dirnames]): break pathname = filenames[0] for i in range(0,level): pathname = dirname(pathname) return", "the command line by the commaand \"hsserver_lagacy\". The server understand the following commands:", "readout is in progess, execution is delayed until the last readout is finished.", "no reply set_bin,2,2 - Use full readout mode (2048x2048 pixels), no reply (The", "int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor == n: return if not self.state()", "exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details))", "background image. Otherwise, the image # correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\")", "where the detector remaingns in \"reading\" state # forever. <NAME> 27 Mar 2014", "to be specified, but uses auto-generated filenames instead. # As a work-araound generated", "return status def is_idle (self): try: status = self.state_code() except: return True #", "system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def", "an image the symblix link redirects is to create an image with #", "system of the server, not locally. If 'save_raw' is true (default: false), the", "self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and stores", "\"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from", "detector once after starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0,", "flags if (status & ~0x0444444F) == 0: return True else: return False def", "in case there is no valid background image (after startup or binning changed).", "the current bin mode, which is substracted from every image after readout before", "logging is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def", "time() def image_size(self): \"\"\"Width and height of the image in pixels at the", "not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image without", "1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode ==", "acquire bits 8-11: read bits 12-15: correct bits 16-19: write bits 20-23: dezinger", "return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply", "% (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import gettempdir return", "return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts the pathname of a", "a workaround for promblem caused by the Rayonix software running # under a", "return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if verbose", "readout_raw(self): \"Reads the detector out without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read", "\"\": return from os.path import dirname directory = dirname(filename) if directory == \"\":", "for instance after the server has been restarted or after the bin factor", "!= \"idle\": sleep(0.05) # Need a valid background image before starting acquisition. if", "import stderr if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\"", "return if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n))", "75, 10: 120} bin_factor = self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor]", "in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname end", "def query(self,command): \"\"\"Send a command that generates a reply. Return the reply\"\"\" self.log(\"query", "6: t+= [\"unavailable\"] if (status & 0x0000000F) == 7: t+= [\"error\"] if (status", "is interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename))", "before the new # bin factor is read back. t = time() while", "datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts", "automatically reads a background image # if there is not valid backgournd image.", "0 and not self.ignore_first_trigger: # The detector software does not save to first", "(status & 0x00200000) != 0: t+= [\"dezingering\"] if (status & 0x00400000) != 0:", "format used on the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps to", "0 or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s: %s:", "%s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of", "pixels as the last read image the correction as saving to file will", "or \"Personal Name\") set to \"2222\". Or, alternatively from the command line by", "0x00000010) != 0: t+= [\"integrate queued\"] if (status & 0x00000020) != 0: t+=", "interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def", "the backgound image if needed, for instance after the server has been restarted", "1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger =", "image # correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return", "the real filenames. When the rayonix softawre tries to save # an image", "from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name", "# Jul 2014, version 0.3.10), the detector still requires 11 trigger pulses #", "not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After", "if (status & 0x04000000) != 0: t+= [\"series error\"] state = \",\".join(t) return", "does not work with protocol v1 (timeout) \"\"\"Width and height of the current", "% (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and", "if (status & 0x00004000) != 0: t+= [\"correct error\"] if (status & 0x00010000)", "= common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i in", "saves it to a file no reply readout,1 - reads a new background", "= \"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def", "bad # image, when using triggered frame transfer mode. However, (as of #", "and Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound image, which", "function returns immediately. The pathname of the file is interpreted in file system", "fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while not self.is_integrating() and", "level in range(1,4): dirnames = [] for pathname in filenames: for i in", "= filenames[0] for i in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector =", "sequence aquistion mode, cancel it. if not self.state() == \"idle\": self.abort() while self.state()", "the background image is empty and this value is 0. \"\"\" try: return", "location of image files for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def", "rayonix softawre tries to save # an image the symblix link redirects is", "filenames[0] for i in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector()", "from the command line by the commaand \"hsserver_lagacy\". The server understand the following", "execution is delayed until the last readout is finished. This also acquires a", "return pathname end = \"/\" if pathname.endswith(\"/\") else \"\" # Try to expand", "integration mode, no reply readout,0,filename - Reads out the detector, corrects the image", "[\"correct queued\"] if (status & 0x00002000) != 0: t+= [\"correcting\"] if (status &", "displaying the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the last read", "If the backgroud image does not have the the same number of pixels", "reply readout,0,filename - Reads out the detector, corrects the image and saves it", "and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if", "example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD software from", "(status & 0x00400000) != 0: t+= [\"dezinger error\"] if (status & 0x01000000) !=", "start - Puts the CCD to integration mode, no reply readout,0,filename - Reads", "iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def", "if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor", "# bit mask 0x00444440 masks out error flags if (status & ~0x0444444F) ==", "whether the task status of \"read\" # is either \"queued\" or \"executing\" return", "acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a filename is given,", "!= 0: t+= [\"writing\"] if (status & 0x00040000) != 0: t+= [\"write error\"]", "!= 0: t+= [\"dezingering\"] if (status & 0x00400000) != 0: t+= [\"dezinger error\"]", "0x00000400) != 0: t+= [\"read error\"] if (status & 0x00001000) != 0: t+=", "if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while not self.is_integrating() and self.connected:", "the directory is world-writable\"\"\" # This is a workaround for promblem caused by", "allocate_lock() # If this flag is set 'start' automatically reads a background image", "true (default: false), the image raw data is saved rather than the correct", "saving to file will fail. At startup, the background image is empty and", "if not pathname: return pathname end = \"/\" if pathname.endswith(\"/\") else \"\" #", "number_field_width: number of digits for the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\"", "it takes about 2 s before the new # bin factor is read", "The server understand the following commands: start - Puts the CCD to integration", "interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered frame transfer, 2", "logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" %", "[\"integrating\"] if (status & 0x00000040) != 0: t+= [\"integrate error\"] if (status &", "path = \"\" for part in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\")", "for pathname in filenames: for i in range(0,level): pathname = dirname(pathname) dirnames +=", "\"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter 9: The Legacy Remote Mode", "no reply get_state - reply is integer number containing 6 4-bit fields bits", "[\"correct error\"] if (status & 0x00010000) != 0: t+= [\"write queued\"] if (status", "== \"\": return from os.path import dirname directory = dirname(filename) if directory ==", "n: return if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05)", "remote(filename_base) # If already in sequence aquistion mode, cancel it. if not self.state()", "10: 120} bin_factor = self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else:", "messages. Display the message and append it to the error log file. If", "contains a 4-bit code, with the following meaning: 0=idle, 1=queued, 2=executing, 4=error The", "return [] if len(filenames) == 1: return dirname(filenames[0]) for level in range(1,4): dirnames", "does not save to first image, which is a bad # image, when", "[\"dezingering\"] if (status & 0x00400000) != 0: t+= [\"dezinger error\"] if (status &", "file. If verbose logging is enabled, it is also added to the transcript.\"\"\"", "UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\")", "detector, corrects the image and saves it to a file no reply readout,1", "= directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)): link =", "the result as background while not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self):", "enabled, it is also added to the transcript.\"\"\" from sys import stderr if", "self.last_read = time() def image_size(self): \"\"\"Width and height of the image in pixels", "!= 0: t+= [\"reading\"] if (status & 0x00000400) != 0: t+= [\"read error\"]", "reply. Return the reply\"\"\" self.log(\"query %r\" % command) from tcp_client import query return", "self.connected: return True return ((self.state_code() & 0x00000020) != 0) def is_reading (self): \"tells", "# Verbose logging: record every command and reply in /tmp/rayonix_detector.log self.verbose_logging = True", "the task status of \"read\" # is either \"queued\" or \"executing\" if (status", "correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default verbose logging is", "the backgound image for the current bin mode, which is substracted from every", "int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the backgound image if needed, for", "but uses auto-generated filenames instead. # As a work-araound generated a series of", "converts the pathname of a file on a network file server from the", "on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import exists from sys import", "pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": # for testing from pdb", "of \"read\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300) !=", "when problem solved. logging = False @property def readout_time(self): \"\"\"Estimated readout time in", "does not generate a reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send", "pathname def makedirs(pathname): \"\"\"Create a directory, or make sure that the directory is", "0x00002000) != 0: t+= [\"correcting\"] if (status & 0x00004000) != 0: t+= [\"correct", "saves the uncorrected image as a file. The image file is written in", "os import symlink,remove from shutil import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\"", "of %r to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) #", "until the last readout is finished. This also acquires a background image, in", "headersize+image_nbytes return filesize def bkg_image_size(self): # does not work with protocol v1 (timeout)", "set_bin,8,8 - Use 512x512-pixel bin mode, no reply set_bin,2,2 - Use full readout", "has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software", "Arguments\" or \"Personal Name\") set to \"2222\". Or, alternatively from the command line", "import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript", "is not None: self.name = name self.timeout = 1.0 # This is to", "dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by an exteranal", "self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not", "image_size = 7680/bin_factor # MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize =", "Rayonix_Detector() if __name__ == \"__main__\": # for testing from pdb import pm import", "t+= [\"read error\"] if (status & 0x00001000) != 0: t+= [\"correct queued\"] if", "allow a list of filenames # to be specified, but uses auto-generated filenames", "information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\" # bit", "be valid pathname on file system of the Rayonix computer\"\"\" # The detector", "queued\"] if (status & 0x00000200) != 0: t+= [\"reading\"] if (status & 0x00000400)", "system of the Rayonix computer filename_suffix: including the dot (.) number_field_width: number of", "range(1,4): dirnames = [] for pathname in filenames: for i in range(0,level): pathname", "if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for other", "dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": # for testing", "(status & 0x00100000) != 0: t+= [\"dezinger queued\"] if (status & 0x00200000) !=", "detector. If a filename is given, the image is saved as a file.", "or raw images. self.save_raw = False # For triggred image acquiation # 0:", "a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0", "not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed", "omit microsconds def remote(pathname): \"\"\"This converts the pathname of a file on a", "time import sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote", "bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def", "time()-t0 > 2.0: self.abort() # Make sure there is a valid background image.", "while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger:", "+= part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname +=", "than the correct image. \"\"\" if filename != None: self.make_directory(filename) if not self.save_raw:", "code tell whether the task status of \"correct\" # is either \"queued\" or", "server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the", "state, (maybe for the clearing to stop?) When wait=False, do no wait for", "solved. logging = False @property def readout_time(self): \"\"\"Estimated readout time in seconds. Changes", "all([n == dirnames[0] for n in dirnames]): break pathname = filenames[0] for i", "mode, 3 = LCLS mode frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\"", "\"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value):", "machine might not be writable on the Rayonix computer. # E.g. user id", "pathname in filenames: for i in range(0,level): pathname = dirname(pathname) dirnames += [pathname]", "i in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__", "import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging filenames", "might not be writable on the Rayonix computer. # E.g. user id 10660(xppopr)", "try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname): umask(0000)", "the image in pixels at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except:", "if filename != None: self.make_directory(filename) if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename))", "bits 8-11: read bits 12-15: correct bits 16-19: write bits 20-23: dezinger Each", "as the last read image the correction as saving to file will fail.", "if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename))", "is either \"queued\" or \"executing\" if (status & 0x00000300) != 0: self.last_read =", "1x1 bin mode with 4096x4096 pixels is not used, because the point-spread function", "# Workaround: Software-trigger the detector once after starting a series. self.trigger_signal_type = \"Software\"", "# is either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300) != 0) def", "images timed by an external hardware trigger signal. filenames: list of absolute pathnames.", "The detector will ignore an \"acquire_images_triggered\" command if not # in \"idle\" state.", "start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by an exteranal hardware trigger", "pathname += end return pathname def makedirs(pathname): \"\"\"Create a directory, or make sure", "self.last_read = time() def readout_raw(self): \"Reads the detector out without correcting and displaying", "which is substracted from every image after readout before the correction is applied.", "\"hsserver_lagacy\". The server understand the following commands: start - Puts the CCD to", "# on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import exists from sys", "True return ((self.state_code() & 0x00000020) != 0) def is_reading (self): \"tells whether the", "self.state_code() except: return True # bit mask 0x00444440 masks out error flags if", "!= 0) def is_correcting (self): \"tells whether the chip is currently being read", "directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the message", "if pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4:", "the background image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter 9:", "of the trigger initiates frame transfer/readout # 1: rising edge starts acquisition, #", "external hardware trigger signal. filenames: list of absolute pathnames. Directory part must be", "property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor ==", "pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging", "self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the message to the transcript, if", "not used, because the point-spread function of the fiber optic taper is large", "2, 2: 10, 3: 15, 4: 25, 5: 40, 6: 60, 8: 75,", "!= 0: t+= [\"write error\"] if (status & 0x00100000) != 0: t+= [\"dezinger", "persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address, in case there is more", "(timeout) \"\"\"Width and height of the current background image in pixels. This value", "(suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the", "image_size(self): \"\"\"Width and height of the image in pixels at the current bin", "as background while not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and", "empty and this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0", "The image file is written in background as a pipelined operation. The function", "== 0: return [] if len(filenames) == 1: return dirname(filenames[0]) for level in", "value is important to know if the bin factor is changed. If the", "= property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh", "image as a file. The image file is written in background as a", "last read image without spatial and uniformity correction to a file. The pathname", "\"\"\"Send a command that generates a reply. Return the reply\"\"\" self.log(\"query %r\" %", "as a file. The image file is written in background as a pipelined", "control computer, so directories created via NFS on the # control machine might", "factor is changed. If the backgroud image does not have the the same", "def log_error(self,message): \"\"\"For error messages. Display the message and append it to the", "0: t+= [\"reading\"] if (status & 0x00000400) != 0: t+= [\"read error\"] if", "the detector into integration mode by stopping the continuous clearing. In case the", "2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\"", "self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not", "valid background image (after startup or binning changed). wait: The is a 0.2", "'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while", "task status of \"read\" # is either \"queued\" or \"executing\" if (status &", "file size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS", "every image after readout before the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size()", "image to a file. The pathname of the file is interpreted in file", "is interpreted in file system of the server, not locally. If 'save_raw' is", "from os.path import dirname,relpath,islink,exists from os import symlink,remove from shutil import rmtree directory", "formatted ASCCI text, precise to 1 ms\"\"\" from datetime import datetime timestamp =", "self.is_reading(): sleep(0.05) # Work-around for a bug where the detector remaingns in \"reading\"", "(status & 0x01000000) != 0: t+= [\"series queued\"] if (status & 0x02000000) !=", "(pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" %", "letter to a UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\".", "iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname):", "hardware trigger signal. filenames: list of absolute pathnames. Directory part must be valid", "of a file on a network file server from the local format to", "write bits 20-23: dezinger Each filed contains a 4-bit code, with the following", "((self.state_code() & 0x00000020) != 0) def is_reading (self): \"tells whether the chip is", "the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read", "headersize = 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self):", "# default name \"rayonix_detector\" may be overridden in subclass from logging import debug,info,warn,error", "\"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # Need a valid background image", "state def start(self,wait=True): \"\"\"Puts the detector into integration mode by stopping the continuous", "for HS Detector Control Author: <NAME> Date created: 2013-09-20 Date last modified: 2018-06-101", "as function of bin factor: readout_rate = {1: 2, 2: 10, 3: 15,", "if not self.verbose_logging: return if len(message) == 0 or message[-1] != \"\\n\": message", "s before the new # bin factor is read back. t = time()", "Exception,msg: error(\"Relative path of %r with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname", "will ignore an \"acquire_images_triggered\" command if not # in \"idle\" state. if not", "[] if len(filenames) == 1: return dirname(filenames[0]) for level in range(1,4): dirnames =", "%r\" % (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r:", "def get_logfile(self): \"\"\"File name for transcript if verbose logging is enabled.\"\"\" from tempfile", "== 6: t+= [\"unavailable\"] if (status & 0x0000000F) == 7: t+= [\"error\"] if", "fail. At startup, the background image is empty and this value is 0.", "def readout_raw(self): \"Reads the detector out without correcting and displaying the image.\" self.write(\"readout,3\")", "multi-thread safe. self.lock = allocate_lock() # If this flag is set 'start' automatically", "full readout mode (2048x2048 pixels), no reply (The 1x1 bin mode with 4096x4096", "try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the backgound image if", "verbose logging is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile)", "error flags if (status & ~0x0444444F) == 0: return True else: return False", "mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a filename is given, the", "# Readout rate in frames per second as function of bin factor: readout_rate", "acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger: # The", "default verbose logging is enabled. Change when problem solved. logging = False @property", "== dirnames[0] for n in dirnames]): break pathname = filenames[0] for i in", "code, with the following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is the", "correction to a file. The pathname of the file is interpreted in file", "dirname,relpath,islink,exists from os import symlink,remove from shutil import rmtree directory = common_topdir(filenames) tempdir", "the image and saves it to a file no reply readout,1 - reads", "error\"] state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector into integration", "trigger signal. filenames: list of absolute pathnames. Directory part must be valid pathname", "== \"__main__\": # for testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\")", "Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state():", "def readout(self,filename=None): \"\"\"Reads the detector. If a filename is given, the image is", "!= 0: t+= [\"integrating\"] if (status & 0x00000040) != 0: t+= [\"integrate error\"]", "is empty and this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return", "while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and stores the result", "path += part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname", "the correct image. \"\"\" if filename != None: self.make_directory(filename) if not self.save_raw: if", "def __init__(self,name=None): \"\"\"name: used for IP address, in case there is more than", "0x01000000) != 0: t+= [\"series queued\"] if (status & 0x02000000) != 0: t+=", "timed by an external hardware trigger signal. filenames: list of absolute pathnames. Directory", "readout,0,filename - Reads out the detector, corrects the image and saves it to", "(2048x2048 pixels), no reply (The 1x1 bin mode with 4096x4096 pixels is not", "if (status & 0x01000000) != 0: t+= [\"series queued\"] if (status & 0x02000000)", "in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read =", "%r: %s\" % (reply,message)) return 0 # bit 8 and 9 of the", "property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh the", "self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode == 0", "flags if (status & ~0x0444444F) == 0: return \"idle\" t = [] if", "case the CCD readout is in progess, execution is delayed until the last", "# start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered", "sure the directory to write the image to exists. from os.path import dirname", "debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r'", "from os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If", "8: t+= [\"busy\"] if (status & 0x00000010) != 0: t+= [\"integrate queued\"] if", "directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already in sequence aquistion", "\"Reads the detector out without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read =", "a file. The pathname of the file is interpreted in file system of", "the Rayonix software running # under a different user id on the Rayonix", "transcript if verbose logging is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile", "of the file is interpreted in file system of the server, not locally.", "before the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default verbose", "pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4: server", "[\"integrate queued\"] if (status & 0x00000020) != 0: t+= [\"integrating\"] if (status &", "True # Whether to save corrected or raw images. self.save_raw = False #", "!= 0: t+= [\"read error\"] if (status & 0x00001000) != 0: t+= [\"correct", "== self.image_size() # By default verbose logging is enabled. Change when problem solved.", "of the state code tell whether the task status of \"read\" # is", "including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize = 4096 image_nbytes", "chip is integrating mode (not reading, not clearing)\" # \"acquire\" field is \"executing\"", "gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date and time as", "in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename):", "self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self):", "is integrating mode (not reading, not clearing)\" # \"acquire\" field is \"executing\" if", "thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control the MAR CCD", "(status & 0x00004000) != 0: t+= [\"correct error\"] if (status & 0x00010000) !=", "of pixels of current the background image, e.g. \"2048,2048\" Reference: Rayonix HS detector", "control panel with the second parameter (\"Server command\" or \"Device Database Server\") set", "allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control the MAR CCD detector Using", "# filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered frame transfer, 2 =", "except: return 0 def update_bkg(self): \"\"\"Updates the backgound image if needed, for instance", "is_integrating (self): \"tells whether the chip is integrating mode (not reading, not clearing)\"", "of digits for the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make", "!= 0: self.last_read = time() return status def is_idle (self): try: status =", "# interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered frame transfer,", "query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply ==", "!= None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\")", "from tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply", "directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\"", "triggred image acquiation # 0: the rising edge of the trigger initiates frame", "series\": sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() # Wait for", "& 0x00002000) != 0: t+= [\"correcting\"] if (status & 0x00004000) != 0: t+=", "bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read bits 12-15: correct", "(status & 0x0000000F) == 8: t+= [\"busy\"] if (status & 0x00000010) != 0:", "exists by create it, if necessary.\"\"\" if filename is None or filename ==", "error\"] if (status & 0x00100000) != 0: t+= [\"dezinger queued\"] if (status &", "time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected image as a", "if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\" t =", "command line by the commaand \"hsserver_lagacy\". The server understand the following commands: start", "to the transcript, if verbose logging is enabled.\"\"\" if not self.verbose_logging: return if", "import makedirs,umask,chmod from os.path import exists from sys import stderr if exists(pathname) and", "may be overridden in subclass from logging import debug,info,warn,error import socket from time", "filename is None or filename == \"\": return from os.path import dirname directory", "read back. t = time() while self.get_bin_factor() != n and time()-t < 3:", "def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return", "messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File", "0: self.last_read = time() return status def is_idle (self): try: status = self.state_code()", "== \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin", "to expand a Windows drive letter to a UNC name. try: import win32wnet", "to know if the bin factor is changed. If the backgroud image does", "makedirs,umask,chmod from os.path import exists from sys import stderr if exists(pathname) and not", "if pathname.endswith(\"/\") else \"\" # Try to expand a Windows drive letter to", "the detector and saves the uncorrected image as a file. The image file", "image, which is substracted from every image after readout before the correction is", "instance after the server has been restarted or after the bin factor has", "the trigger initiates frame transfer/readout # 1: rising edge starts acquisition, # falling", "read image without spatial and uniformity correction to a file. The pathname of", "remote(pathname): \"\"\"This converts the pathname of a file on a network file server", "Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan # Readout rate", "set_bin,2,2 - Use full readout mode (2048x2048 pixels), no reply (The 1x1 bin", "Workaround: Software-trigger the detector once after starting a series. self.trigger_signal_type = \"Software\" #", "self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed) image readout to complete. sleep(self.readout_time)", "large compared to the pixel size) get_bin - reply is two integer numbers,", "for level in range(1,4): dirnames = [] for pathname in filenames: for i", "not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename !=", "self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and stores the result as background", "HS detector manual 0.3e Chapter 9: The Legacy Remote Mode for HS Detector", "return 0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply", "def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If", "to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname end = \"/\" if", "from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date", "= 1 from numpy import nan # Readout rate in frames per second", "the server, not locally. If 'save_raw' is true (default: false), the image raw", "spatial and uniformity correction to a file. The pathname of the file is", "e.g. \"2,2\" get_size_bkg - reply is the number of pixels of current the", "& 0x00000200) != 0: t+= [\"reading\"] if (status & 0x00000400) != 0: t+=", "0: t+= [\"series queued\"] if (status & 0x02000000) != 0: t+= [\"acquiring series\"]", "from os.path import exists from sys import stderr if exists(pathname) and not iswritable(pathname):", "= persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address, in case there is", "from datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds def", "if (status & 0x0000000F) == 8: t+= [\"busy\"] if (status & 0x00000010) !=", "name self.timeout = 1.0 # This is to make the query method multi-thread", "dot (.) number_field_width: number of digits for the filename sequence number, e.g. 6", "self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the", "return filesize def bkg_image_size(self): # does not work with protocol v1 (timeout) \"\"\"Width", "in range(1,4): dirnames = [] for pathname in filenames: for i in range(0,level):", "integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try: status =", "self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command", "2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self): # does not work with", "headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize = 4096 image_nbytes =", "real filenames. When the rayonix softawre tries to save # an image the", "it, if necessary.\"\"\" if filename is None or filename == \"\": return from", "5: 40, 6: 60, 8: 75, 10: 120} bin_factor = self.bin_factor if bin_factor", "0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read bits 12-15: correct bits", "files for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire", "sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control the", "= \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05)", "binning changed). wait: The is a 0.2 s delay until te detectror enters", "reply get_state - reply is integer number containing 6 4-bit fields bits 0-3:", "whether the chip is currently being read out\" # bit 8 and 9", "images timed by an exteranal hardware trigger signal filename_base: Directory part must be", "pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging filenames =", "True # bit mask 0x00444440 masks out error flags if (status & ~0x0444444F)", "file on a network file server from the local format to the format", "a file. The image file is written in background as a pipelined operation.", "'test000001.rx'\"\"\" # Make sure the directory to write the image to exists. from", "do no wait for this to happen. \"\"\" ##t0 = time() # Wait", "acquiation # 0: the rising edge of the trigger initiates frame transfer/readout #", "return ((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\"", "not self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed) image readout to complete.", "import with_statement \"\"\" Remote control of the MAR CCD detector, using <NAME>'s sample", "import dirname,relpath,islink,exists from os import symlink,remove from shutil import rmtree directory = common_topdir(filenames)", "\"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS", "been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have", "& 0x00000100) != 0: t+= [\"read queued\"] if (status & 0x00000200) != 0:", "(\"Server Arguments\" or \"Personal Name\") set to \"2222\". Or, alternatively from the command", "t+= [\"correct queued\"] if (status & 0x00002000) != 0: t+= [\"correcting\"] if (status", "not have the the same number of pixels as the last read image", "value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates", "edge starts acquisition, # falling edge initiates frame transfer/readout self.bulb_mode = 0 #", "the rising edge of the trigger initiates frame transfer/readout # 1: rising edge", "the chip is currently being read out\" # bit 8 and 9 of", "\"\"\" if filename != None: self.make_directory(filename) if not self.save_raw: if filename != None:", "mode, which is substracted from every image after readout before the correction is", "sure that the directory is world-writable\"\"\" # This is a workaround for promblem", "via NFS on the # control machine might not be writable on the", "to create an image with # the specified name. from os.path import dirname,relpath,islink,exists", "is delayed until the last readout is finished. This also acquires a background", "t+= [\"write queued\"] if (status & 0x00020000) != 0: t+= [\"writing\"] if (status", "not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for other applications", "8=busy. writefile,<filename>,1 - Save the last read image, no reply set_bin,8,8 - Use", "image, no reply get_state - reply is integer number containing 6 4-bit fields", "\"queued\" or \"executing\" return ((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status information", "This value is important to know if the bin factor is changed. If", "status of \"read\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300)", "start(self,wait=True): \"\"\"Puts the detector into integration mode by stopping the continuous clearing. In", "a reply. Return the reply\"\"\" self.log(\"query %r\" % command) from tcp_client import query", "len(parts) >= 4: server = parts[2] ; share = parts[3] path = \"\"", "self.bulb_mode == 0 and not self.ignore_first_trigger: # The detector software does not save", "the detector, corrects the image and saves it to a file no reply", "# Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass", "command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server", "specified name. from os.path import dirname,relpath,islink,exists from os import symlink,remove from shutil import", "the first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self):", "data is saved rather than the correct image. \"\"\" if filename != None:", "read image the correction as saving to file will fail. At startup, the", "def set_bin_factor(self,n): if self.bin_factor == n: return if not self.state() == \"idle\": self.abort()", "except: return \"\" # bit mask 0x00444440 masks out error flags if (status", "rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1)", "(status & 0x00040000) != 0: t+= [\"write error\"] if (status & 0x00100000) !=", "control the MAR CCD detector Using remote protocol version 1\"\"\" name = \"rayonix_detector\"", "to happen. \"\"\" ##t0 = time() # Wait for the readout of the", "background while not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and height", "second as function of bin factor: readout_rate = {1: 2, 2: 10, 3:", "2=executing, 4=error The exception is the 'state' field, which has only 0=idle and", "False def is_integrating (self): \"tells whether the chip is integrating mode (not reading,", "else: read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory", "after readout before the correction is applied. \"\"\" if not self.is_idle(): self.abort() while", "= time() while self.get_bin_factor() != n and time()-t < 3: sleep (0.1) bin_factor", "persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used", "reply set_bin,8,8 - Use 512x512-pixel bin mode, no reply set_bin,2,2 - Use full", "%r to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save", "import datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This", "self.abort() while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command does not allow", "of symbilic link complying to the # naming scheme imposed by the 'start_series_triggered'", "CCD to integration mode, no reply readout,0,filename - Reads out the detector, corrects", "0) def is_correcting (self): \"tells whether the chip is currently being read out\"", "try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file size in bytes", "filesize def bkg_image_size(self): # does not work with protocol v1 (timeout) \"\"\"Width and", "in range(0,level): pathname = dirname(pathname) dirnames += [pathname] if all([n == dirnames[0] for", "\"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() # Wait", "a valid background image. Otherwise, the image # correction will fail. if self.auto_bkg:", "filename is given, the image is saved as a file. The image file", "##while not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and", "of the image in pixels at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0])", "= remote(filename_base) # If already in sequence aquistion mode, cancel it. if not", "locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the detector out", "((self.state_code() & 0x00000300) != 0) def is_correcting (self): \"tells whether the chip is", "500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import exists from", "case there is no valid background image (after startup or binning changed). wait:", "t+= [\"integrate queued\"] if (status & 0x00000020) != 0: t+= [\"integrating\"] if (status", "to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot", "is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default verbose logging is enabled.", "to save corrected or raw images. self.save_raw = False # For triggred image", "\"\" # bit mask 0x00444440 masks out error flags if (status & ~0x0444444F)", "for promblem caused by the Rayonix software running # under a different user", "because the point-spread function of the fiber optic taper is large compared to", "for the clearing to stop?) When wait=False, do no wait for this to", "s delay until te detectror enters \"integrating\" state, (maybe for the clearing to", "= filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to %r: %s\"", "message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File", "= self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try: status = int(eval(reply)) except", "return True # bit mask 0x00444440 masks out error flags if (status &", "filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time()", "4: 25, 5: 40, 6: 60, 8: 75, 10: 120} bin_factor = self.bin_factor", "server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image", "\"tells whether the chip is currently being read out\" # bit 8 and", "def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type)", "uniformity correction to a file. The pathname of the file is interpreted in", "bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize = 4096", "\"\"\"For error messages. Display the message and append it to the error log", "return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory of teh given filename", "self.write(\"start\") if not wait: return while not self.is_integrating() and self.connected: sleep (0.05) def", "[] for pathname in filenames: for i in range(0,level): pathname = dirname(pathname) dirnames", "file system of the server, not locally. If 'save_raw' is true (default: false),", "status of \"correct\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000)", "(0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def read_bkg(self): \"\"\"Reads", "save to first image, which is a bad # image, when using triggered", "to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators", "frame transfer/readout self.bulb_mode = 0 # Keep track of when the detector was", "(status & 0x0000000F) == 7: t+= [\"error\"] if (status & 0x0000000F) == 8:", "filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered frame transfer, 2 = bulb", "stores the result as background while not self.is_idle(): sleep(0.05) self.last_read = time() def", "on the Rayonix control computer, compared # to the beamline control computer, so", "does not have the the same number of pixels as the last read", "ASCCI text, precise to 1 ms\"\"\" from datetime import datetime timestamp = str(datetime.now())", "if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # The", "have a the backgound image for the current bin mode, which is substracted", "except Exception,msg: error(\"Relative path of %r with respect to %r: %s\" % (filenames[i],tempdir,msg))", "(i+1) if islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative", "%r\" % command) from tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information", "image. Otherwise, the image # correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if", "1= triggered frame transfer, 2 = bulb mode, 3 = LCLS mode frame_trigger_type", "initiates frame transfer/readout # 1: rising edge starts acquisition, # falling edge initiates", "self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return", "& 0x0000000F) == 7: t+= [\"error\"] if (status & 0x0000000F) == 8: t+=", "is a valid background image. Otherwise, the image # correction will fail. if", "% value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try:", "in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize =", "{1: 2, 2: 10, 3: 15, 4: 25, 5: 40, 6: 60, 8:", "CCD compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not", "forever. <NAME> 27 Mar 2014 ##if time()-t0 > 2.0: self.abort() # Make sure", "(status & 0x00000200) != 0: t+= [\"reading\"] if (status & 0x00000400) != 0:", "bin mode, no reply set_bin,2,2 - Use full readout mode (2048x2048 pixels), no", "that does not generate a reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command):", "Control control panel with the second parameter (\"Server command\" or \"Device Database Server\")", "!= 0: t+= [\"dezinger queued\"] if (status & 0x00200000) != 0: t+= [\"dezingering\"]", "work with protocol v1 (timeout) \"\"\"Width and height of the current background image", "= path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return pathname", "try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with respect to", "(status & 0x02000000) != 0: t+= [\"acquiring series\"] if (status & 0x04000000) !=", "E.g. user id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\"", "image and saves it to a file no reply readout,1 - reads a", "not wait: return while not self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel", "optic taper is large compared to the pixel size) get_bin - reply is", "if (status & 0x00000200) != 0: t+= [\"reading\"] if (status & 0x00000400) !=", "compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname:", "scheme imposed by the 'start_series_triggered' command that # point ot the real filenames.", "def is_correcting (self): \"tells whether the chip is currently being read out\" #", "meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is the 'state' field, which has", "(status & 0x04000000) != 0: t+= [\"series error\"] state = \",\".join(t) return state", "(status & 0x00000040) != 0: t+= [\"integrate error\"] if (status & 0x00000100) !=", "track of when the detector was last read. self.last_read = 0.0 # Verbose", "CCD detector Using remote protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property import", "out\" # bit 8 and 9 of the state code tell whether the", "mode (2048x2048 pixels), no reply (The 1x1 bin mode with 4096x4096 pixels is", "a background image, in case there is no valid background image (after startup", "to stop?) When wait=False, do no wait for this to happen. \"\"\" ##t0", "for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a", "to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or \"Personal Name\") set to", "\"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except:", "== \"\": return 0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated", "of the state code tell whether the task status of \"correct\" # is", "file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves", "enabled.\"\"\" if not self.verbose_logging: return if len(message) == 0 or message[-1] != \"\\n\":", "& 0x00200000) != 0: t+= [\"dezingering\"] if (status & 0x00400000) != 0: t+=", "def makedirs(pathname): \"\"\"Create a directory, or make sure that the directory is world-writable\"\"\"", "break pathname = filenames[0] for i in range(0,level): pathname = dirname(pathname) return pathname", "t+= [\"series error\"] state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector", "None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename):", "acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by an external hardware trigger signal.", "\"\"\"Does detector software have a the backgound image for the current bin mode,", "number containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits", "background as a pipelined operation. The function returns immediately. The pathname of the", "given filename exists by create it, if necessary.\"\"\" if filename is None or", "bin factor has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does", "queued\"] if (status & 0x00200000) != 0: t+= [\"dezingering\"] if (status & 0x00400000)", "will fail. At startup, the background image is empty and this value is", "time() def readout_raw(self): \"Reads the detector out without correcting and displaying the image.\"", "program \"marccd_server_socket\" with TCP port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The", "= tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir)", "def filesize(self,bin_factor): \"\"\"Image file size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size =", "user id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from", "\"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered, 1=", "access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import dirname if len(filenames)", "[] if (status & 0x0000000F) == 6: t+= [\"unavailable\"] if (status & 0x0000000F)", "for 'test000001.rx'\"\"\" # Make sure the directory to write the image to exists.", "import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname)", "drive letter to a UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to", "self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS", "= pathname.split(\"/\") if len(parts) >= 4: server = parts[2] ; share = parts[3]", "state code tell whether the task status of \"read\" # is either \"queued\"", "filename_base = remote(filename_base) # If already in sequence aquistion mode, cancel it. if", "= 0.0 # Verbose logging: record every command and reply in /tmp/rayonix_detector.log self.verbose_logging", "makedirs(pathname): \"\"\"Create a directory, or make sure that the directory is world-writable\"\"\" #", "the reply\"\"\" self.log(\"query %r\" % command) from tcp_client import query return query(self.ip_address,command) def", "a file no reply readout,1 - reads a new background image, no reply", "directory to write the image to exists. from os.path import dirname directory =", "and stores the result as background while not self.is_idle(): sleep(0.05) self.last_read = time()", "in pixels at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0", "and this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def", "with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan # Readout rate in", "address, in case there is more than one detector\"\"\" if name is not", "strings\"\"\" from os.path import dirname if len(filenames) == 0: return [] if len(filenames)", "pixels), no reply (The 1x1 bin mode with 4096x4096 pixels is not used,", "write(self,command): \"\"\"Sends a comman that does not generate a reply\"\"\" from tcp_client import", "8 and 9 of the state code tell whether the task status of", "time() return status def is_idle (self): try: status = self.state_code() except: return True", "(filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to", "number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure the directory to write the", "id on the Rayonix control computer, compared # to the beamline control computer,", "# for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address", "Mar 2014 ##if time()-t0 > 2.0: self.abort() # Make sure there is a", "Convert separators from DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\")", "!= 0) def is_reading (self): \"tells whether the chip is currently being read", "0: t+= [\"dezingering\"] if (status & 0x00400000) != 0: t+= [\"dezinger error\"] if", "(status & 0x00000100) != 0: t+= [\"read queued\"] if (status & 0x00000200) !=", "te detectror enters \"integrating\" state, (maybe for the clearing to stop?) When wait=False,", "% (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append", "= \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return pathname def makedirs(pathname): \"\"\"Create", "\"Personal Name\") set to \"2222\". Or, alternatively from the command line by the", "reads a new background image, no reply get_state - reply is integer number", "self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try: status = int(eval(reply)) except Exception,message:", "id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os", "win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS style to UNIX style. pathname", "pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS", "of the server, not locally. If 'save_raw' is true (default: false), the image", "if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read =", "remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with respect", "# 0: the rising edge of the trigger initiates frame transfer/readout # 1:", "is finished. This also acquires a background image, in case there is no", "and uniformity correction to a file. The pathname of the file is interpreted", "tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)): link", "remote protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property ip_address =", "!= n and time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X", "by create it, if necessary.\"\"\" if filename is None or filename == \"\":", "timestamp(): \"\"\"Current date and time as formatted ASCCI text, precise to 1 ms\"\"\"", "task status of \"correct\" # is either \"queued\" or \"executing\" return ((self.state_code() &", "has only 0=idle and 8=busy. writefile,<filename>,1 - Save the last read image, no", "as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\" # bit mask", "(status & 0x00000010) != 0: t+= [\"integrate queued\"] if (status & 0x00000020) !=", "image without spatial and uniformity correction to a file. The pathname of the", "\"integrating\" state, (maybe for the clearing to stop?) When wait=False, do no wait", "is not valid backgournd image. self.auto_bkg = True # Whether to save corrected", "def image_size(self): \"\"\"Width and height of the image in pixels at the current", "CCD detector, using <NAME>'s sample remote control server program \"marccd_server_socket\" with TCP port", "\"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the message and append", "0: return [] if len(filenames) == 1: return dirname(filenames[0]) for level in range(1,4):", "t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\"", "\"executing\" if not self.connected: return True return ((self.state_code() & 0x00000020) != 0) def", "# point ot the real filenames. When the rayonix softawre tries to save", "bin mode, which is substracted from every image after readout before the correction", "from sys import stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details:", "the state code tell whether the task status of \"correct\" # is either", "naming scheme imposed by the 'start_series_triggered' command that # point ot the real", "- Save the last read image, no reply set_bin,8,8 - Use 512x512-pixel bin", "is a 0.2 s delay until te detectror enters \"integrating\" state, (maybe for", "0x00200000) != 0: t+= [\"dezingering\"] if (status & 0x00400000) != 0: t+= [\"dezinger", "\"\"\"Updates the backgound image if needed, for instance after the server has been", "using triggered frame transfer mode. However, (as of # Jul 2014, version 0.3.10),", "get_logfile(self): \"\"\"File name for transcript if verbose logging is enabled.\"\"\" from tempfile import", "the readout of the previous image to finish. while self.is_reading(): sleep(0.05) # Work-around", "image the correction as saving to file will fail. At startup, the background", "doc=\"Readout X and Y bin factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound", "query method multi-thread safe. self.lock = allocate_lock() # If this flag is set", "trigger pulses # to aquire 10 images. # Workaround: Software-trigger the detector once", "mode, no reply set_bin,2,2 - Use full readout mode (2048x2048 pixels), no reply", "current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file", "transfer/readout # 1: rising edge starts acquisition, # falling edge initiates frame transfer/readout", "Or, alternatively from the command line by the commaand \"hsserver_lagacy\". The server understand", "id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import exists", "rather than the correct image. \"\"\" if filename != None: self.make_directory(filename) if not", "backgournd image. self.auto_bkg = True # Whether to save corrected or raw images.", "self.timeout = 1.0 # This is to make the query method multi-thread safe.", "# This is a workaround for promblem caused by the Rayonix software running", "the file is interpreted in file system of the server, not locally. If", "from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control the MAR", "pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": #", "the following commands: start - Puts the CCD to integration mode, no reply", "readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected image as a file. The", "is the number of pixels of current the background image, e.g. \"2048,2048\" Reference:", "connected(self): from tcp_client import connected return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends", "delayed until the last readout is finished. This also acquires a background image,", ">= 4: server = parts[2] ; share = parts[3] path = \"\" for", "every command and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from", "def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by an exteranal hardware", "try: rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" %", "readout is finished. This also acquires a background image, in case there is", "dirname(pathname) dirnames += [pathname] if all([n == dirnames[0] for n in dirnames]): break", "0=idle,8=busy bits 4-7: acquire bits 8-11: read bits 12-15: correct bits 16-19: write", "a bad # image, when using triggered frame transfer mode. However, (as of", "the last read image without spatial and uniformity correction to a file. The", "4-bit code, with the following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is", "time() # Wait for the readout of the previous image to finish. while", "try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to %r: %s\" % (pathname,link,msg))", "return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the backgound image if needed,", "sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it takes about 2 s", "% (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os import access,W_OK", "persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP", "def read_bkg(self): \"\"\"Reads a fresh the backgound image, which is substracted from every", "dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already in sequence", "the correction as saving to file will fail. At startup, the background image", "property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if verbose logging is enabled.\"\"\" from", "0x00000100) != 0: t+= [\"read queued\"] if (status & 0x00000200) != 0: t+=", "state. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) #", "[\"integrate error\"] if (status & 0x00000100) != 0: t+= [\"read queued\"] if (status", "starts acquisition, # falling edge initiates frame transfer/readout self.bulb_mode = 0 # Keep", "detector software does not save to first image, which is a bad #", "gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if verbose logging", "filed contains a 4-bit code, with the following meaning: 0=idle, 1=queued, 2=executing, 4=error", "happen. \"\"\" ##t0 = time() # Wait for the readout of the previous", "numbers, e.g. \"2,2\" get_size_bkg - reply is the number of pixels of current", "connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a comman that does not generate", "Change when problem solved. logging = False @property def readout_time(self): \"\"\"Estimated readout time", "in \"reading\" state # forever. <NAME> 27 Mar 2014 ##if time()-t0 > 2.0:", "the bin factor has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self):", "the task status of \"correct\" # is either \"queued\" or \"executing\" return ((self.state_code()", "if filename is None or filename == \"\": return from os.path import dirname", "int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\" % (reply,message)) return", "query(self,command): \"\"\"Send a command that generates a reply. Return the reply\"\"\" self.log(\"query %r\"", "common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for i in range(0,len(filenames)):", "backgound image, which is substracted from every image after readout before the correction", "not pathname: return pathname end = \"/\" if pathname.endswith(\"/\") else \"\" # Try", "[\"series error\"] state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector into", "self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed)", "(t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the message to the transcript,", "if __name__ == \"__main__\": # for testing from pdb import pm import logging", "error flags if (status & ~0x0444444F) == 0: return \"idle\" t = []", "DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed", "filesize = headersize+image_nbytes return filesize def bkg_image_size(self): # does not work with protocol", "filesize(self,bin_factor): \"\"\"Image file size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor", "from the Remote Control control panel with the second parameter (\"Server command\" or", "TCP port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started", "(after startup or binning changed). wait: The is a 0.2 s delay until", "Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" %", "read image to a file. The pathname of the file is interpreted in", "Software-trigger the detector once after starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0,", "exception is the 'state' field, which has only 0=idle and 8=busy. writefile,<filename>,1 -", "if (status & 0x0000000F) == 6: t+= [\"unavailable\"] if (status & 0x0000000F) ==", "IP address, in case there is more than one detector\"\"\" if name is", "or binning changed). wait: The is a 0.2 s delay until te detectror", "from shutil import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except:", "pathname.endswith(\"/\") else \"\" # Try to expand a Windows drive letter to a", "rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir) for", "log_error(self,message): \"\"\"For error messages. Display the message and append it to the error", "on file system of the Rayonix computer\"\"\" # The detector will ignore an", "backgound image if needed, for instance after the server has been restarted or", "!= 0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code()", "function of the fiber optic taper is large compared to the pixel size)", "When the rayonix softawre tries to save # an image the symblix link", "= 2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self): # does not work", "to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts", "either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status", "for i in range(0,level): pathname = dirname(pathname) dirnames += [pathname] if all([n ==", "- reply is the number of pixels of current the background image, e.g.", "- Puts the CCD to integration mode, no reply readout,0,filename - Reads out", "self.trigger() # Wait for the first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type", "Jul 2014, version 0.3.10), the detector still requires 11 trigger pulses # to", "parameter (\"Server command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third", "last read image to a file. The pathname of the file is interpreted", "cancel it. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05)", "and not self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed) image readout to", "including the dot (.) number_field_width: number of digits for the filename sequence number,", "Rayonix control computer, compared # to the beamline control computer, so directories created", "except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\" % (reply,message)) return 0", "server is started from the MarCCD software from the Remote Control control panel", "[\"write queued\"] if (status & 0x00020000) != 0: t+= [\"writing\"] if (status &", "The pathname of the file is interpreted in file system of the server,", "detector remaingns in \"reading\" state # forever. <NAME> 27 Mar 2014 ##if time()-t0", "Name\") set to \"2222\". Or, alternatively from the command line by the commaand", "parts = pathname.split(\"/\") if len(parts) >= 4: server = parts[2] ; share =", "correct bits 16-19: write bits 20-23: dezinger Each filed contains a 4-bit code,", "there is a valid background image. Otherwise, the image # correction will fail.", "0x00010000) != 0: t+= [\"write queued\"] if (status & 0x00020000) != 0: t+=", "\"\"\"Image file size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor #", "is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the", "trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto", "# The detector software does not save to first image, which is a", "MAR CCD detector Using remote protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property", "using <NAME>'s sample remote control server program \"marccd_server_socket\" with TCP port number 2222.", "def state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\":", "3 = LCLS mode frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" %", "= False @property def readout_time(self): \"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\"", "commaand \"hsserver_lagacy\". The server understand the following commands: start - Puts the CCD", "\"tells whether the chip is integrating mode (not reading, not clearing)\" # \"acquire\"", "\"idle\": sleep(0.05) # The \"start_series_triggered\" command does not allow a list of filenames", "not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # Need a", "if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05)", "read. self.last_read = 0.0 # Verbose logging: record every command and reply in", "\"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the backgound image", "\"\"\"Width and height of the image in pixels at the current bin mode\"\"\"", "of the Rayonix computer\"\"\" # The detector will ignore an \"acquire_images_triggered\" command if", "pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS style to UNIX", "# If already in sequence aquistion mode, cancel it. if not self.state() ==", "set 'start' automatically reads a background image # if there is not valid", "(status & 0x00000020) != 0: t+= [\"integrating\"] if (status & 0x00000040) != 0:", "trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if", "def is_idle (self): try: status = self.state_code() except: return True # bit mask", "0x0000000F) == 8: t+= [\"busy\"] if (status & 0x00000010) != 0: t+= [\"integrate", "folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of", "reply is the number of pixels of current the background image, e.g. \"2048,2048\"", "== \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # Need a valid background", "0x02000000) != 0: t+= [\"acquiring series\"] if (status & 0x04000000) != 0: t+=", "0: t+= [\"correcting\"] if (status & 0x00004000) != 0: t+= [\"correct error\"] if", "from os import symlink,remove from shutil import rmtree directory = common_topdir(filenames) tempdir =", "status def is_idle (self): try: status = self.state_code() except: return True # bit", "27 Mar 2014 ##if time()-t0 > 2.0: self.abort() # Make sure there is", "filenames # to be specified, but uses auto-generated filenames instead. # As a", "umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is", "def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK)", "the commaand \"hsserver_lagacy\". The server understand the following commands: start - Puts the", "image, which is a bad # image, when using triggered frame transfer mode.", "stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details:", "0: t+= [\"write error\"] if (status & 0x00100000) != 0: t+= [\"dezinger queued\"]", "fresh the backgound image, which is substracted from every image after readout before", "(t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\"", "import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for", "file no reply readout,1 - reads a new background image, no reply get_state", "self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def", "\"\"\" if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the", "except: return def set_bin_factor(self,n): if self.bin_factor == n: return if not self.state() ==", "\"\"\"Create a directory, or make sure that the directory is world-writable\"\"\" # This", "self.last_read = 0.0 # Verbose logging: record every command and reply in /tmp/rayonix_detector.log", "of the Rayonix computer filename_suffix: including the dot (.) number_field_width: number of digits", "# //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4: server = parts[2] ;", "error\"] if (status & 0x00001000) != 0: t+= [\"correct queued\"] if (status &", "compared # to the beamline control computer, so directories created via NFS on", "datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname):", "None: self.make_directory(filename) if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else:", "correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while not", "win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except:", "the CCD to integration mode, no reply readout,0,filename - Reads out the detector,", "result as background while not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width", "code tell whether the task status of \"read\" # is either \"queued\" or", "return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the message and append it", "create an image with # the specified name. from os.path import dirname,relpath,islink,exists from", "startup, the background image is empty and this value is 0. \"\"\" try:", "self.state_code() except: return \"\" # bit mask 0x00444440 masks out error flags if", "while not self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\"", "bits 4-7: acquire bits 8-11: read bits 12-15: correct bits 16-19: write bits", "if islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path", "transfer mode. However, (as of # Jul 2014, version 0.3.10), the detector still", "%r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of", "image before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not", "25, 5: 40, 6: 60, 8: 75, 10: 120} bin_factor = self.bin_factor if", "softawre tries to save # an image the symblix link redirects is to", "for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link):", "\"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it takes about 2", "(self): try: status = self.state_code() except: return True # bit mask 0x00444440 masks", "##if time()-t0 > 2.0: self.abort() # Make sure there is a valid background", "try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file", "being read out\" # bit 8 and 9 of the state code tell", "0=idle and 8=busy. writefile,<filename>,1 - Save the last read image, no reply set_bin,8,8", "= timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from", "= 1.0 # This is to make the query method multi-thread safe. self.lock", "update_bkg(self): \"\"\"Updates the backgound image if needed, for instance after the server has", "return def set_bin_factor(self,n): if self.bin_factor == n: return if not self.state() == \"idle\":", "Control Author: <NAME> Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ =", "(reply,message)) return 0 # bit 8 and 9 of the state code tell", "save_raw_image(self,filename): \"\"\"Saves the last read image without spatial and uniformity correction to a", "background image (after startup or binning changed). wait: The is a 0.2 s", "0x00000020) != 0: t+= [\"integrating\"] if (status & 0x00000040) != 0: t+= [\"integrate", "\"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command does not", "tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates a reply.", "if not # in \"idle\" state. if not self.state() == \"idle\": self.abort() while", "os.path import dirname directory = dirname(filename) if directory == \"\": return makedirs(directory) def", "is world-writable\"\"\" # This is a workaround for promblem caused by the Rayonix", "integrating mode (not reading, not clearing)\" # \"acquire\" field is \"executing\" if not", "is enabled.\"\"\" if not self.verbose_logging: return if len(message) == 0 or message[-1] !=", "pixels at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def", "self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and height of the image", "None or filename == \"\": return from os.path import dirname directory = dirname(filename)", "not pathname.endswith(end): pathname += end return pathname def makedirs(pathname): \"\"\"Create a directory, or", "relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with respect to %r: %s\" %", "pathname on file system of the Rayonix computer\"\"\" # The detector will ignore", "restarted or after the bin factor has been changed. \"\"\" if not self.bkg_valid():", "t+= [\"read queued\"] if (status & 0x00000200) != 0: t+= [\"reading\"] if (status", "0x00444440 masks out error flags if (status & ~0x0444444F) == 0: return \"idle\"", "exists from sys import stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except", "applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of", "the image raw data is saved rather than the correct image. \"\"\" if", "is a workaround for promblem caused by the Rayonix software running # under", "& 0x00003000) != 0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status", "takes about 2 s before the new # bin factor is read back.", "part must be valid pathname on file system of the Rayonix computer filename_suffix:", "chip is currently being read out\" # bit 8 and 9 of the", "- Use 512x512-pixel bin mode, no reply set_bin,2,2 - Use full readout mode", "[\"busy\"] if (status & 0x00000010) != 0: t+= [\"integrate queued\"] if (status &", "!= \"idle\": sleep(0.05) # The \"start_series_triggered\" command does not allow a list of", "Directory part must be valid pathname on file system of the Rayonix computer", "if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # Need", "Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or \"Personal Name\")", "bin factor is read back. t = time() while self.get_bin_factor() != n and", "in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames):", "error log file. If verbose logging is enabled, it is also added to", "try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\"", "triggered frame transfer, 2 = bulb mode, 3 = LCLS mode frame_trigger_type =", "the detector was last read. self.last_read = 0.0 # Verbose logging: record every", "save corrected or raw images. self.save_raw = False # For triggred image acquiation", "stop?) When wait=False, do no wait for this to happen. \"\"\" ##t0 =", "NFS on the # control machine might not be writable on the Rayonix", "has been restarted or after the bin factor has been changed. \"\"\" if", "write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates a reply. Return the", "self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger()", "%s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages.", "import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates a reply. Return", "or \"executing\" return ((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status information as", "%s\" % (reply,message)) return 0 # bit 8 and 9 of the state", "import nan # Readout rate in frames per second as function of bin", "This is a workaround for promblem caused by the Rayonix software running #", "is in progess, execution is delayed until the last readout is finished. This", "%r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os import", "Save location of image files for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames))", "file or folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames:", "if all([n == dirnames[0] for n in dirnames]): break pathname = filenames[0] for", "else: return False def is_integrating (self): \"tells whether the chip is integrating mode", "image the symblix link redirects is to create an image with # the", "an exteranal hardware trigger signal filename_base: Directory part must be valid pathname on", "for part in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path", "the fiber optic taper is large compared to the pixel size) get_bin -", "self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if", "= dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already in sequence aquistion mode,", "list of strings\"\"\" from os.path import dirname if len(filenames) == 0: return []", "# read the CCD and stores the result as background while not self.is_idle():", "after readout before the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By", "name \"rayonix_detector\" may be overridden in subclass from logging import debug,info,warn,error import socket", "time() def save_image(self,filename): \"\"\"Saves the last read image to a file. The pathname", "this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self):", "return \"idle\" t = [] if (status & 0x0000000F) == 6: t+= [\"unavailable\"]", "Each filed contains a 4-bit code, with the following meaning: 0=idle, 1=queued, 2=executing,", "\"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\" may be overridden in subclass", "created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name", "\"correct\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000) != 0)", "from every image after readout before the correction is applied.\"\"\" return self.bkg_image_size() ==", "str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts the pathname of", "image, in case there is no valid background image (after startup or binning", "image in pixels at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return", "digits for the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure", "state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\"", "a work-araound generated a series of symbilic link complying to the # naming", "\"2222\". Or, alternatively from the command line by the commaand \"hsserver_lagacy\". The server", "an \"acquire_images_triggered\" command if not # in \"idle\" state. if not self.state() ==", "a valid background image before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode ==", "time() while self.get_bin_factor() != n and time()-t < 3: sleep (0.1) bin_factor =", "from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for", "self.last_read = time() return status def is_idle (self): try: status = self.state_code() except:", "self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and stores the", "valid pathname on file system of the Rayonix computer filename_suffix: including the dot", "Chapter 9: The Legacy Remote Mode for HS Detector Control Author: <NAME> Date", "name for transcript if verbose logging is enabled.\"\"\" from tempfile import gettempdir return", "is substracted from every image after readout before the correction is applied. \"\"\"", "mode with 4096x4096 pixels is not used, because the point-spread function of the", "import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\")", "self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change", "!= None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time() def", "pathnames. Directory part must be valid pathname on file system of the Rayonix", "must be valid pathname on file system of the Rayonix computer filename_suffix: including", "valid background image. Otherwise, the image # correction will fail. if self.auto_bkg: self.update_bkg()", "pipelined operation. The function returns immediately. The pathname of the file is interpreted", "Verbose logging: record every command and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property", "this to happen. \"\"\" ##t0 = time() # Wait for the readout of", "self.bulb_mode = 0 # Keep track of when the detector was last read.", "\"__main__\": # for testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self", "def timestamp(): \"\"\"Current date and time as formatted ASCCI text, precise to 1", "from os import makedirs,umask,chmod from os.path import exists from sys import stderr if", "local format to the format used on the MAR CCD compter. e.g. \"//id14bxf/data\"", "before the correction is applied. \"\"\" if not self.is_idle(): self.abort() while not self.is_idle():", "of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series", "# bin factor is read back. t = time() while self.get_bin_factor() != n", "gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if", "0.3.10), the detector still requires 11 trigger pulses # to aquire 10 images.", "\"\"\"Sends a comman that does not generate a reply\"\"\" from tcp_client import write", "needed, for instance after the server has been restarted or after the bin", "back. t = time() while self.get_bin_factor() != n and time()-t < 3: sleep", "(t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the", "\"\"\"Reads the detector and saves the uncorrected image as a file. The image", "len(filenames) == 1: return dirname(filenames[0]) for level in range(1,4): dirnames = [] for", "mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file size in", "self.update_bkg() self.write(\"start\") if not wait: return while not self.is_integrating() and self.connected: sleep (0.05)", "in \"idle\" state. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\":", "the same number of pixels as the last read image the correction as", "the CCD and stores the result as background while not self.is_idle(): sleep(0.05) self.last_read", "bin factor: readout_rate = {1: 2, 2: 10, 3: 15, 4: 25, 5:", "from tcp_client import connected return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a", "a reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that", "transfer, 2 = bulb mode, 3 = LCLS mode frame_trigger_type = 2 if", "directory, or make sure that the directory is world-writable\"\"\" # This is a", "before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger:", "not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and height of the", "logfile = property(get_logfile) def timestamp(): \"\"\"Current date and time as formatted ASCCI text,", "parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end):", "self.log_error(\"command 'get_state' generated bad reply %r: %s\" % (reply,message)) return 0 # bit", "3: 15, 4: 25, 5: 40, 6: 60, 8: 75, 10: 120} bin_factor", "file is interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename)", "set_bin_factor(self,n): if self.bin_factor == n: return if not self.state() == \"idle\": self.abort() while", "out error flags if (status & ~0x0444444F) == 0: return True else: return", "\"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or", "Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD software", "sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return", "in progess, execution is delayed until the last readout is finished. This also", "Using remote protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property ip_address", "sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected", "# Make sure there is a valid background image. Otherwise, the image #", "user id on the Rayonix control computer, compared # to the beamline control", "filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else:", "else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read", "self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image without spatial and uniformity correction", "self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state():", "path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return", "% (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the message to the", "\"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error", "messages. Append the message to the transcript, if verbose logging is enabled.\"\"\" if", "+= \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" %", "Remote Mode for HS Detector Control Author: <NAME> Date created: 2013-09-20 Date last", "to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while", "= dirname(filename) if directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages.", "substracted from every image after readout before the correction is applied.\"\"\" return self.bkg_image_size()", "link complying to the # naming scheme imposed by the 'start_series_triggered' command that", "& ~0x0444444F) == 0: return True else: return False def is_integrating (self): \"tells", "only 0=idle and 8=busy. writefile,<filename>,1 - Save the last read image, no reply", "0 = not triggered, 1= triggered frame transfer, 2 = bulb mode, 3", "to a file no reply readout,1 - reads a new background image, no", "the last read image the correction as saving to file will fail. At", "Try to expand a Windows drive letter to a UNC name. try: import", "\"\"\"name: used for IP address, in case there is more than one detector\"\"\"", "raw data is saved rather than the correct image. \"\"\" if filename !=", "image acquiation # 0: the rising edge of the trigger initiates frame transfer/readout", "verbose logging is enabled.\"\"\" if not self.verbose_logging: return if len(message) == 0 or", "last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\" may be", "part in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if", "or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" %", "a 0.2 s delay until te detectror enters \"integrating\" state, (maybe for the", "tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date and", "\"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def", "frame transfer, 2 = bulb mode, 3 = LCLS mode frame_trigger_type = 2", "if (status & ~0x0444444F) == 0: return True else: return False def is_integrating", "if (status & 0x00001000) != 0: t+= [\"correct queued\"] if (status & 0x00002000)", "& 0x00400000) != 0: t+= [\"dezinger error\"] if (status & 0x01000000) != 0:", "create it, if necessary.\"\"\" if filename is None or filename == \"\": return", "import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass makedirs(tempdir)", "\"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message))", "the format used on the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps", "not generate a reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a", "timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile", "on the Rayonix computer. # E.g. user id 10660(xppopr) on \"xpp-daq\", versus user", "detector and saves the uncorrected image as a file. The image file is", "for a bug where the detector remaingns in \"reading\" state # forever. <NAME>", "know if the bin factor is changed. If the backgroud image does not", "server = parts[2] ; share = parts[3] path = \"\" for part in", "- Use full readout mode (2048x2048 pixels), no reply (The 1x1 bin mode", "modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\" may be overridden", "= allocate_lock() # If this flag is set 'start' automatically reads a background", "wait: The is a 0.2 s delay until te detectror enters \"integrating\" state,", "respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg:", "changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a", "from logging import debug,info,warn,error import socket from time import sleep,time from thread import", "# This is to make the query method multi-thread safe. self.lock = allocate_lock()", "0 # bit 8 and 9 of the state code tell whether the", "# image, when using triggered frame transfer mode. However, (as of # Jul", "logging = False @property def readout_time(self): \"\"\"Estimated readout time in seconds. Changes with", "for n in dirnames]): break pathname = filenames[0] for i in range(0,level): pathname", "= [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r' % rayonix_detector.ip_address)", "0: t+= [\"correct queued\"] if (status & 0x00002000) != 0: t+= [\"correcting\"] if", "self.write(\"readout,1\") # read the CCD and stores the result as background while not", "out error flags if (status & ~0x0444444F) == 0: return \"idle\" t =", "no valid background image (after startup or binning changed). wait: The is a", "readout_rate = {1: 2, 2: 10, 3: 15, 4: 25, 5: 40, 6:", "= self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan", "\"\"\"Acquire a series of images timed by an external hardware trigger signal. filenames:", "DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: #", "== 0 and not self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed) image", "dirnames[0] for n in dirnames]): break pathname = filenames[0] for i in range(0,level):", "\"\"\"filenames: list of strings\"\"\" from os.path import dirname if len(filenames) == 0: return", "file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the message", "software running # under a different user id on the Rayonix control computer,", "with TCP port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is", "version 1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger", "and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from tcp_client import", "0: t+= [\"integrating\"] if (status & 0x00000040) != 0: t+= [\"integrate error\"] if", "Exception,msg: error(\"Cannot create of %r to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])):", "if verbose logging is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile =", "Reference: Rayonix HS detector manual 0.3e Chapter 9: The Legacy Remote Mode for", "of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last", "clearing. In case the CCD readout is in progess, execution is delayed until", "port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from", "0x04000000) != 0: t+= [\"series error\"] state = \",\".join(t) return state def start(self,wait=True):", "not allow a list of filenames # to be specified, but uses auto-generated", "seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan # Readout", "the detector remaingns in \"reading\" state # forever. <NAME> 27 Mar 2014 ##if", "command that # point ot the real filenames. When the rayonix softawre tries", "= time() return status def is_idle (self): try: status = self.state_code() except: return", "pixels of current the background image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual", "Remote Control control panel with the second parameter (\"Server command\" or \"Device Database", "there is no valid background image (after startup or binning changed). wait: The", "11 trigger pulses # to aquire 10 images. # Workaround: Software-trigger the detector", "the backgound image, which is substracted from every image after readout before the", "write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates a reply. Return the reply\"\"\"", "image, no reply set_bin,8,8 - Use 512x512-pixel bin mode, no reply set_bin,2,2 -", "or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\"", "= True @property def connected(self): from tcp_client import connected return connected(self.ip_address) online =", "t = time() while self.get_bin_factor() != n and time()-t < 3: sleep (0.1)", "image is empty and this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except:", "is large compared to the pixel size) get_bin - reply is two integer", "0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4: server = parts[2]", "images. self.save_raw = False # For triggred image acquiation # 0: the rising", "\"\"\"Acquire a series of images timed by an exteranal hardware trigger signal filename_base:", "try: status = self.state_code() except: return \"\" # bit mask 0x00444440 masks out", "%r with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link)", "workaround for promblem caused by the Rayonix software running # under a different", "from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images", "first image, which is a bad # image, when using triggered frame transfer", "end = \"/\" if pathname.endswith(\"/\") else \"\" # Try to expand a Windows", "!= \"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message))", "after the bin factor has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def", "not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def", "rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": # for testing from pdb import", "0=idle, 1=queued, 2=executing, 4=error The exception is the 'state' field, which has only", "socket from time import sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is", "sure that the directory of teh given filename exists by create it, if", "from os.path import dirname directory = dirname(filename) if directory == \"\": return makedirs(directory)", "pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >=", "import dirname directory = dirname(filename) if directory == \"\": return makedirs(directory) def log_error(self,message):", "range(0,level): pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\":", "Whether to save corrected or raw images. self.save_raw = False # For triggred", "background image before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and", "string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\" # bit mask 0x00444440", "of \"correct\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000) !=", "status = self.state_code() except: return True # bit mask 0x00444440 masks out error", "filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure the directory to", "chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname): umask(0000) try:", "the CCD readout is in progess, execution is delayed until the last readout", "progess, execution is delayed until the last readout is finished. This also acquires", "'get_state' generated bad reply %r: %s\" % (reply,message)) return 0 # bit 8", "on \"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod", "currently being read out\" # bit 8 and 9 of the state code", "if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD", "HS Detector Control Author: <NAME> Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\"", "not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves", "verbose logging is enabled. Change when problem solved. logging = False @property def", "if verbose logging is enabled.\"\"\" if not self.verbose_logging: return if len(message) == 0", "mode by stopping the continuous clearing. In case the CCD readout is in", "as a pipelined operation. The function returns immediately. The pathname of the file", "% (i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r' % rayonix_detector.ip_address) print('') print('rayonix_detector.bin_factor')", "correct image. \"\"\" if filename != None: self.make_directory(filename) if not self.save_raw: if filename", "save_image(self,filename): \"\"\"Saves the last read image to a file. The pathname of the", "if self.bulb_mode == 0 and not self.ignore_first_trigger: # The detector software does not", "with the following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is the 'state'", "to the # naming scheme imposed by the 'start_series_triggered' command that # point", "9: The Legacy Remote Mode for HS Detector Control Author: <NAME> Date created:", "the transcript.\"\"\" from sys import stderr if len(message) == 0 or message[-1] !=", "0x00001000) != 0: t+= [\"correct queued\"] if (status & 0x00002000) != 0: t+=", "t+= [\"unavailable\"] if (status & 0x0000000F) == 7: t+= [\"error\"] if (status &", "file server from the local format to the format used on the MAR", "either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300) != 0) def is_correcting (self):", "+= [pathname] if all([n == dirnames[0] for n in dirnames]): break pathname =", "!= 0: t+= [\"correct queued\"] if (status & 0x00002000) != 0: t+= [\"correcting\"]", "at the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor):", "Legacy Remote Mode for HS Detector Control Author: <NAME> Date created: 2013-09-20 Date", "(maybe for the clearing to stop?) When wait=False, do no wait for this", "rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)]", "\"2,2\" get_size_bkg - reply is the number of pixels of current the background", "if (status & 0x00000300) != 0: self.last_read = time() return status def is_idle", "redirects is to create an image with # the specified name. from os.path", "ignore an \"acquire_images_triggered\" command if not # in \"idle\" state. if not self.state()", "= str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts the pathname", "1=queued, 2=executing, 4=error The exception is the 'state' field, which has only 0=idle", "to remote control the MAR CCD detector Using remote protocol version 1\"\"\" name", "if (status & 0x00002000) != 0: t+= [\"correcting\"] if (status & 0x00004000) !=", "the dot (.) number_field_width: number of digits for the filename sequence number, e.g.", "# under a different user id on the Rayonix control computer, compared #", "tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except", "not be writable on the Rayonix computer. # E.g. user id 10660(xppopr) on", "case there is more than one detector\"\"\" if name is not None: self.name", "= pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts)", "file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time()", "get_bin - reply is two integer numbers, e.g. \"2,2\" get_size_bkg - reply is", "frame transfer/readout # 1: rising edge starts acquisition, # falling edge initiates frame", "%r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create", "it to the error log file. If verbose logging is enabled, it is", "style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file", "not clearing)\" # \"acquire\" field is \"executing\" if not self.connected: return True return", "# Make sure the directory to write the image to exists. from os.path", "# The \"start_series_triggered\" command does not allow a list of filenames # to", "2.0: self.abort() # Make sure there is a valid background image. Otherwise, the", "background image is empty and this value is 0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0])", "of %r with respect to %r: %s\" % (filenames[i],tempdir,msg)) pathname = filenames[i] try:", "\"idle\" t = [] if (status & 0x0000000F) == 6: t+= [\"unavailable\"] if", "At startup, the background image is empty and this value is 0. \"\"\"", "triggered frame transfer mode. However, (as of # Jul 2014, version 0.3.10), the", "%r: %r\" % (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs:", "a bin factor change it takes about 2 s before the new #", "generates a reply. Return the reply\"\"\" self.log(\"query %r\" % command) from tcp_client import", "the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default verbose logging", "def save_image(self,filename): \"\"\"Saves the last read image to a file. The pathname of", "the following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is the 'state' field,", "either \"queued\" or \"executing\" if (status & 0x00000300) != 0: self.last_read = time()", "sleep(0.05) # Need a valid background image before starting acquisition. if self.auto_bkg: self.update_bkg()", "directory is world-writable\"\"\" # This is a workaround for promblem caused by the", "readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import", "%s\" % (t,message)) def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import gettempdir", "##t0 = time() # Wait for the readout of the previous image to", "server understand the following commands: start - Puts the CCD to integration mode,", "readout mode (2048x2048 pixels), no reply (The 1x1 bin mode with 4096x4096 pixels", "not self.ignore_first_trigger: # The detector software does not save to first image, which", "reply readout,1 - reads a new background image, no reply get_state - reply", "no reply set_bin,8,8 - Use 512x512-pixel bin mode, no reply set_bin,2,2 - Use", "locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by", "\"\"\"Reads a fresh the backgound image, which is substracted from every image after", "commands: start - Puts the CCD to integration mode, no reply readout,0,filename -", "safe. self.lock = allocate_lock() # If this flag is set 'start' automatically reads", "Mode for HS Detector Control Author: <NAME> Date created: 2013-09-20 Date last modified:", "get_size_bkg - reply is the number of pixels of current the background image,", "absolute pathnames. Directory part must be valid pathname on file system of the", "command and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from tcp_client", "Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\"", "n in dirnames]): break pathname = filenames[0] for i in range(0,level): pathname =", "network file server from the local format to the format used on the", "if (status & 0x00000010) != 0: t+= [\"integrate queued\"] if (status & 0x00000020)", "(self): \"tells whether the chip is integrating mode (not reading, not clearing)\" #", "%(message)s\") self = rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for", "= time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected image as", "teh given filename exists by create it, if necessary.\"\"\" if filename is None", "t+= [\"dezingering\"] if (status & 0x00400000) != 0: t+= [\"dezinger error\"] if (status", "self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05)", "transfer/readout self.bulb_mode = 0 # Keep track of when the detector was last", "not self.verbose_logging: return if len(message) == 0 or message[-1] != \"\\n\": message +=", "= \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector into integration mode by", "else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode", "\"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert", "not None: self.name = name self.timeout = 1.0 # This is to make", "false), the image raw data is saved rather than the correct image. \"\"\"", "pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\") if", "\"rayonix_detector\" from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None):", "or filename == \"\": return from os.path import dirname directory = dirname(filename) if", "queued\"] if (status & 0x00002000) != 0: t+= [\"correcting\"] if (status & 0x00004000)", "trigger signal filename_base: Directory part must be valid pathname on file system of", "== 0 or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s:", "4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self): # does", "[\"correcting\"] if (status & 0x00004000) != 0: t+= [\"correct error\"] if (status &", "is interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\")", "a background image # if there is not valid backgournd image. self.auto_bkg =", "return from os.path import dirname directory = dirname(filename) if directory == \"\": return", "2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize", "= nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory of teh", "def write(self,command): \"\"\"Sends a comman that does not generate a reply\"\"\" from tcp_client", "makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or", "(i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r' % rayonix_detector.ip_address) print('') print('rayonix_detector.bin_factor') print('rayonix_detector.acquire_images_triggered(filenames)')", "0 # Keep track of when the detector was last read. self.last_read =", "substracted from every image after readout before the correction is applied. \"\"\" if", "signal. filenames: list of absolute pathnames. Directory part must be valid pathname on", "\"executing\" return ((self.state_code() & 0x00000300) != 0) def is_correcting (self): \"tells whether the", "transcript, if verbose logging is enabled.\"\"\" if not self.verbose_logging: return if len(message) ==", "edge initiates frame transfer/readout self.bulb_mode = 0 # Keep track of when the", "image with # the specified name. from os.path import dirname,relpath,islink,exists from os import", "end return pathname def makedirs(pathname): \"\"\"Create a directory, or make sure that the", "@property def readout_time(self): \"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor =", "return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import dirname if", "used on the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\"", "except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname)", "if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) #", "the MarCCD software from the Remote Control control panel with the second parameter", "in file system of the server, not locally. If 'save_raw' is true (default:", "versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path", "error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self):", "detector still requires 11 trigger pulses # to aquire 10 images. # Workaround:", "Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\"", "except Exception,msg: error(\"Cannot create of %r to %r: %s\" % (pathname,link,msg)) if not", "it to a file no reply readout,1 - reads a new background image,", "image in pixels. This value is important to know if the bin factor", "about 2 s before the new # bin factor is read back. t", "7680/bin_factor # MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return", "with_statement \"\"\" Remote control of the MAR CCD detector, using <NAME>'s sample remote", "necessary.\"\"\" if filename is None or filename == \"\": return from os.path import", "sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in", "12-15: correct bits 16-19: write bits 20-23: dezinger Each filed contains a 4-bit", "pulses # to aquire 10 images. # Workaround: Software-trigger the detector once after", "The detector software does not save to first image, which is a bad", "the image to exists. from os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base", "None: self.name = name self.timeout = 1.0 # This is to make the", "current background image in pixels. This value is important to know if the", "self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected image", "2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD", "0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r:", "queued\"] if (status & 0x00020000) != 0: t+= [\"writing\"] if (status & 0x00040000)", "exists. from os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) #", "0x00000200) != 0: t+= [\"reading\"] if (status & 0x00000400) != 0: t+= [\"read", "return while not self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation", "function of bin factor: readout_rate = {1: 2, 2: 10, 3: 15, 4:", "t+= [\"integrating\"] if (status & 0x00000040) != 0: t+= [\"integrate error\"] if (status", "reply set_bin,2,2 - Use full readout mode (2048x2048 pixels), no reply (The 1x1", "mode (not reading, not clearing)\" # \"acquire\" field is \"executing\" if not self.connected:", "message and append it to the error log file. If verbose logging is", "import dirname if len(filenames) == 0: return [] if len(filenames) == 1: return", "last read image, no reply set_bin,8,8 - Use 512x512-pixel bin mode, no reply", "generated bad reply %r: %s\" % (reply,message)) return 0 # bit 8 and", "without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves", "(as of # Jul 2014, version 0.3.10), the detector still requires 11 trigger", "of the MAR CCD detector, using <NAME>'s sample remote control server program \"marccd_server_socket\"", "self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the detector out without correcting", "0x00004000) != 0: t+= [\"correct error\"] if (status & 0x00010000) != 0: t+=", "= dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": # for", "# if there is not valid backgournd image. self.auto_bkg = True # Whether", "if len(filenames) == 1: return dirname(filenames[0]) for level in range(1,4): dirnames = []", "if the bin factor is changed. If the backgroud image does not have", "be writable on the Rayonix computer. # E.g. user id 10660(xppopr) on \"xpp-daq\",", "bits 16-19: write bits 20-23: dezinger Each filed contains a 4-bit code, with", "0: return True else: return False def is_integrating (self): \"tells whether the chip", "get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile =", "in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not", "0: the rising edge of the trigger initiates frame transfer/readout # 1: rising", "manual 0.3e Chapter 9: The Legacy Remote Mode for HS Detector Control Author:", "detector into integration mode by stopping the continuous clearing. In case the CCD", "version 0.3.10), the detector still requires 11 trigger pulses # to aquire 10", "'state' field, which has only 0=idle and 8=busy. writefile,<filename>,1 - Save the last", "as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try: status", "and time as formatted ASCCI text, precise to 1 ms\"\"\" from datetime import", "= self.state_code() except: return \"\" # bit mask 0x00444440 masks out error flags", "verbose logging is enabled, it is also added to the transcript.\"\"\" from sys", "or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r", "it is also added to the transcript.\"\"\" from sys import stderr if len(message)", "the state code tell whether the task status of \"read\" # is either", "# for testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self =", "no reply readout,1 - reads a new background image, no reply get_state -", "rate in frames per second as function of bin factor: readout_rate = {1:", "timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts the pathname of a file", "0: t+= [\"dezinger error\"] if (status & 0x01000000) != 0: t+= [\"series queued\"]", "delay until te detectror enters \"integrating\" state, (maybe for the clearing to stop?)", "until te detectror enters \"integrating\" state, (maybe for the clearing to stop?) When", "\"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a the", "(\"Server command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter", "is applied. \"\"\" if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") #", "of image files for other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base,", "def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self):", "0 or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\"", "by the 'start_series_triggered' command that # point ot the real filenames. When the", "~0x0444444F) == 0: return \"idle\" t = [] if (status & 0x0000000F) ==", "in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup", "(status & ~0x0444444F) == 0: return True else: return False def is_integrating (self):", "from numpy import nan # Readout rate in frames per second as function", "detector\"\"\" if name is not None: self.name = name self.timeout = 1.0 #", "on the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on", "makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for other applications from DB", "share = parts[3] path = \"\" for part in parts[4:]: path += part+\"/\"", "\",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector into integration mode by stopping", "def bkg_image_size(self): # does not work with protocol v1 (timeout) \"\"\"Width and height", "\"\"\"Saves the last read image to a file. The pathname of the file", "is read back. t = time() while self.get_bin_factor() != n and time()-t <", "separators from DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") ==", "- reply is integer number containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy", "get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor == n: return", "if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" %", "As a work-araound generated a series of symbilic link complying to the #", "and displaying the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the last", "v1 (timeout) \"\"\"Width and height of the current background image in pixels. This", "readout,1 - reads a new background image, no reply get_state - reply is", "# Save location of image files for other applications from DB import dbput", "10 images. # Workaround: Software-trigger the detector once after starting a series. self.trigger_signal_type", "% (filenames[i],tempdir,msg)) pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r", "is more than one detector\"\"\" if name is not None: self.name = name", "def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\")", "2 s before the new # bin factor is read back. t =", "safetyFactor = 1 from numpy import nan # Readout rate in frames per", "\"\"\"File name for transcript if verbose logging is enabled.\"\"\" from tempfile import gettempdir", "# Whether to save corrected or raw images. self.save_raw = False # For", "mode, no reply readout,0,filename - Reads out the detector, corrects the image and", "write the image to exists. from os.path import dirname directory = dirname(filename_base) makedirs(directory)", "image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\"", "((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try:", "that the directory of teh given filename exists by create it, if necessary.\"\"\"", "whether the chip is integrating mode (not reading, not clearing)\" # \"acquire\" field", "self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by an external hardware", "pathname = filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to %r:", "& 0x00000020) != 0: t+= [\"integrating\"] if (status & 0x00000040) != 0: t+=", "will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while not self.is_integrating()", "when the detector was last read. self.last_read = 0.0 # Verbose logging: record", "(status & ~0x0444444F) == 0: return \"idle\" t = [] if (status &", "no reply (The 1x1 bin mode with 4096x4096 pixels is not used, because", "error\"] if (status & 0x01000000) != 0: t+= [\"series queued\"] if (status &", "Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\" may", "return if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\" t", "t+= [\"dezinger error\"] if (status & 0x01000000) != 0: t+= [\"series queued\"] if", "previous image to finish. while self.is_reading(): sleep(0.05) # Work-around for a bug where", "if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger: # The detector", "return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file size in bytes including", "file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire", "a the backgound image for the current bin mode, which is substracted from", "\"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self):", "detector Using remote protocol version 1\"\"\" name = \"rayonix_detector\" from persistent_property import persistent_property", "state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the detector into integration mode", "the MAR CCD detector, using <NAME>'s sample remote control server program \"marccd_server_socket\" with", "__version__ = \"4.0.1\" # default name \"rayonix_detector\" may be overridden in subclass from", "return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor == n: return if not", "import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current date and time", "which is a bad # image, when using triggered frame transfer mode. However,", "understand the following commands: start - Puts the CCD to integration mode, no", "Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or \"Personal", "continuous clearing. In case the CCD readout is in progess, execution is delayed", "the Rayonix computer\"\"\" # The detector will ignore an \"acquire_images_triggered\" command if not", "makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or", "[\"writing\"] if (status & 0x00040000) != 0: t+= [\"write error\"] if (status &", "on a network file server from the local format to the format used", "current bin mode, which is substracted from every image after readout before the", "is_reading (self): \"tells whether the chip is currently being read out\" # bit", "been restarted or after the bin factor has been changed. \"\"\" if not", "parts[2] ; share = parts[3] path = \"\" for part in parts[4:]: path", "size) get_bin - reply is two integer numbers, e.g. \"2,2\" get_size_bkg - reply", "<NAME> Date created: 2013-09-20 Date last modified: 2018-06-101 \"\"\" __version__ = \"4.0.1\" #", "\"\"\"This is to remote control the MAR CCD detector Using remote protocol version", "starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width #", "style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts = pathname.split(\"/\")", "pathname end = \"/\" if pathname.endswith(\"/\") else \"\" # Try to expand a", "self.last_read = time() def save_image(self,filename): \"\"\"Saves the last read image to a file.", "# Wait for the readout of the previous image to finish. while self.is_reading():", "the message and append it to the error log file. If verbose logging", "the 'state' field, which has only 0=idle and 8=busy. writefile,<filename>,1 - Save the", "try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor == n: return if", "def make_directory(self,filename): \"\"\"Make sure that the directory of teh given filename exists by", "self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it takes", "get_state - reply is integer number containing 6 4-bit fields bits 0-3: state:", "bin factor change it takes about 2 s before the new # bin", "Return the reply\"\"\" self.log(\"query %r\" % command) from tcp_client import query return query(self.ip_address,command)", "starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger: #", "else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the", "of absolute pathnames. Directory part must be valid pathname on file system of", "self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\"", "return \"\" # bit mask 0x00444440 masks out error flags if (status &", "on Unix\"\"\" if not pathname: return pathname end = \"/\" if pathname.endswith(\"/\") else", "& 0x04000000) != 0: t+= [\"series error\"] state = \",\".join(t) return state def", "that # point ot the real filenames. When the rayonix softawre tries to", "4096x4096 pixels is not used, because the point-spread function of the fiber optic", "if not self.connected: return True return ((self.state_code() & 0x00000020) != 0) def is_reading", "Rayonix computer\"\"\" # The detector will ignore an \"acquire_images_triggered\" command if not #", "!= None: self.make_directory(filename) if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\")", "in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from tcp_client import connected return", "%r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os", "n and time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and", "= parts[3] path = \"\" for part in parts[4:]: path += part+\"/\" path", "Need a valid background image before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode", "10, 3: 15, 4: 25, 5: 40, 6: 60, 8: 75, 10: 120}", "\"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname end = \"/\" if pathname.endswith(\"/\")", "the clearing to stop?) When wait=False, do no wait for this to happen.", "image for the current bin mode, which is substracted from every image after", "//server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4: server = parts[2] ; share", "taper is large compared to the pixel size) get_bin - reply is two", "of \"read\" # is either \"queued\" or \"executing\" if (status & 0x00000300) !=", "queued\"] if (status & 0x00000020) != 0: t+= [\"integrating\"] if (status & 0x00000040)", "for i in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if", "out without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename):", "islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of", "exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname):", "used, because the point-spread function of the fiber optic taper is large compared", "is written in background as a pipelined operation. The function returns immediately. The", "a directory, or make sure that the directory is world-writable\"\"\" # This is", "os.path import exists from sys import stderr if exists(pathname) and not iswritable(pathname): try:", "change it takes about 2 s before the new # bin factor is", "factor has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector", "is a bad # image, when using triggered frame transfer mode. However, (as", "to the beamline control computer, so directories created via NFS on the #", "# the specified name. from os.path import dirname,relpath,islink,exists from os import symlink,remove from", "# As a work-araound generated a series of symbilic link complying to the", "added to the transcript.\"\"\" from sys import stderr if len(message) == 0 or", "i in range(0,level): pathname = dirname(pathname) dirnames += [pathname] if all([n == dirnames[0]", "= 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self): #", "120} bin_factor = self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time", "\"\"\"Current date and time as formatted ASCCI text, precise to 1 ms\"\"\" from", "for the readout of the previous image to finish. while self.is_reading(): sleep(0.05) #", "Unix\"\"\" if not pathname: return pathname end = \"/\" if pathname.endswith(\"/\") else \"\"", "correction as saving to file will fail. At startup, the background image is", "third parameter (\"Server Arguments\" or \"Personal Name\") set to \"2222\". Or, alternatively from", "reply is two integer numbers, e.g. \"2,2\" get_size_bkg - reply is the number", "ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address, in case there", "to aquire 10 images. # Workaround: Software-trigger the detector once after starting a", "return pathname rayonix_detector = Rayonix_Detector() if __name__ == \"__main__\": # for testing from", "detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup',", "command) from tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\"", "the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of", "!= \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it takes about", "t+= [\"integrate error\"] if (status & 0x00000100) != 0: t+= [\"read queued\"] if", "!= 0: t+= [\"correcting\"] if (status & 0x00004000) != 0: t+= [\"correct error\"]", "& 0x00001000) != 0: t+= [\"correct queued\"] if (status & 0x00002000) != 0:", "bin_factor = self.bin_factor if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time =", "0 def update_bkg(self): \"\"\"Updates the backgound image if needed, for instance after the", "from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates a", "Rayonix computer. # E.g. user id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser)", "if self.bin_factor == n: return if not self.state() == \"idle\": self.abort() while self.state()", "if necessary.\"\"\" if filename is None or filename == \"\": return from os.path", "pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with respect to %r:", "common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import dirname if len(filenames) == 0:", "must be valid pathname on file system of the Rayonix computer\"\"\" # The", "frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() !=", "given, the image is saved as a file. The image file is written", "the current background image in pixels. This value is important to know if", "if directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the", "of images timed by an exteranal hardware trigger signal filename_base: Directory part must", "current the background image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter", "None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while", "[\"write error\"] if (status & 0x00100000) != 0: t+= [\"dezinger queued\"] if (status", "while self.state() != \"idle\": sleep(0.05) # Need a valid background image before starting", "Save the last read image, no reply set_bin,8,8 - Use 512x512-pixel bin mode,", "remaingns in \"reading\" state # forever. <NAME> 27 Mar 2014 ##if time()-t0 >", "~0x0444444F) == 0: return True else: return False def is_integrating (self): \"tells whether", "background image # if there is not valid backgournd image. self.auto_bkg = True", "panel with the second parameter (\"Server command\" or \"Device Database Server\") set to", "0x00444440 masks out error flags if (status & ~0x0444444F) == 0: return True", "bin_facor: 2,4,8,16\"\"\" image_size = 7680/bin_factor # MS340HS headersize = 4096 image_nbytes = 2*image_size**2", "if needed, for instance after the server has been restarted or after the", "import debug,info,warn,error import socket from time import sleep,time from thread import allocate_lock class", "complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\"", "is changed. If the backgroud image does not have the the same number", "\"acquire\" field is \"executing\" if not self.connected: return True return ((self.state_code() & 0x00000020)", "sleep(0.05) if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() # Wait for the", "and 8=busy. writefile,<filename>,1 - Save the last read image, no reply set_bin,8,8 -", "# correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while", "as formatted ASCCI text, precise to 1 ms\"\"\" from datetime import datetime timestamp", "error(\"Cannot create of %r to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i]))", "series of images timed by an exteranal hardware trigger signal filename_base: Directory part", "[\"dezinger queued\"] if (status & 0x00200000) != 0: t+= [\"dezingering\"] if (status &", "# falling edge initiates frame transfer/readout self.bulb_mode = 0 # Keep track of", "for the filename sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure the", "logging is enabled. Change when problem solved. logging = False @property def readout_time(self):", "Keep track of when the detector was last read. self.last_read = 0.0 #", "makedirs(directory) filename_base = remote(filename_base) # If already in sequence aquistion mode, cancel it.", "by stopping the continuous clearing. In case the CCD readout is in progess,", "% (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files", "hardware trigger signal filename_base: Directory part must be valid pathname on file system", "0. \"\"\" try: return int(self.query(\"get_size_bkg\").split(\",\")[0]) except: return 0 def update_bkg(self): \"\"\"Updates the backgound", "a series of symbilic link complying to the # naming scheme imposed by", "or \"executing\" if (status & 0x00000300) != 0: self.last_read = time() return status", "sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and height of the image in", "e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter 9: The Legacy Remote", "Work-around for a bug where the detector remaingns in \"reading\" state # forever.", "def get_error_logfile(self): \"\"\"File name error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile", "Wait for the readout of the previous image to finish. while self.is_reading(): sleep(0.05)", "# E.g. user id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) # on", "while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown", "= time() # Wait for the readout of the previous image to finish.", "which has only 0=idle and 8=busy. writefile,<filename>,1 - Save the last read image,", "new # bin factor is read back. t = time() while self.get_bin_factor() !=", "if there is not valid backgournd image. self.auto_bkg = True # Whether to", "after starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width", "for the current bin mode, which is substracted from every image after readout", "= property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if verbose logging is enabled.\"\"\"", "import socket from time import sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This", "masks out error flags if (status & ~0x0444444F) == 0: return \"idle\" t", "following commands: start - Puts the CCD to integration mode, no reply readout,0,filename", "0: t+= [\"correct error\"] if (status & 0x00010000) != 0: t+= [\"write queued\"]", "% (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\"", "bin factor is changed. If the backgroud image does not have the the", "read out\" # bit 8 and 9 of the state code tell whether", "16-19: write bits 20-23: dezinger Each filed contains a 4-bit code, with the", "self.verbose_logging: return if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\"", "\"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS", "containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11:", "readout before the correction is applied. \"\"\" if not self.is_idle(): self.abort() while not", "read image, no reply set_bin,8,8 - Use 512x512-pixel bin mode, no reply set_bin,2,2", "is true (default: false), the image raw data is saved rather than the", "out the detector, corrects the image and saves it to a file no", "6: 60, 8: 75, 10: 120} bin_factor = self.bin_factor if bin_factor in readout_rate:", "= connected def write(self,command): \"\"\"Sends a comman that does not generate a reply\"\"\"", "in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan #", "sys import stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod:", "second parameter (\"Server command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and the", "interpreted in file system of the server, not locally. If 'save_raw' is true", "in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector = Rayonix_Detector() if __name__ ==", "= headersize+image_nbytes return filesize def bkg_image_size(self): # does not work with protocol v1", "background image, in case there is no valid background image (after startup or", "system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the", "symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to %r: %s\" % (pathname,link,msg)) if", "the beamline control computer, so directories created via NFS on the # control", "date and time as formatted ASCCI text, precise to 1 ms\"\"\" from datetime", "parameter (\"Server Arguments\" or \"Personal Name\") set to \"2222\". Or, alternatively from the", "method multi-thread safe. self.lock = allocate_lock() # If this flag is set 'start'", "# Wait for the first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type =", "time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan", "\"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the detector out without", "(not reading, not clearing)\" # \"acquire\" field is \"executing\" if not self.connected: return", "the MAR CCD detector Using remote protocol version 1\"\"\" name = \"rayonix_detector\" from", "the image # correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait:", "if len(parts) >= 4: server = parts[2] ; share = parts[3] path =", "for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address =", "read_bkg(self): \"\"\"Reads a fresh the backgound image, which is substracted from every image", "control machine might not be writable on the Rayonix computer. # E.g. user", "the Rayonix computer filename_suffix: including the dot (.) number_field_width: number of digits for", "while \"busy\" in self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0])", "(status & 0x00002000) != 0: t+= [\"correcting\"] if (status & 0x00004000) != 0:", "self.abort() # Make sure there is a valid background image. Otherwise, the image", "image does not have the the same number of pixels as the last", "[\"/tmp/test_%03d.mccd\" % (i+1) for i in range(0,10)] print('rayonix_detector.ip_address = %r' % rayonix_detector.ip_address) print('')", "auto-generated filenames instead. # As a work-araound generated a series of symbilic link", "two integer numbers, e.g. \"2,2\" get_size_bkg - reply is the number of pixels", "# does not work with protocol v1 (timeout) \"\"\"Width and height of the", "MarCCD software from the Remote Control control panel with the second parameter (\"Server", "if (status & ~0x0444444F) == 0: return \"idle\" t = [] if (status", "(default: false), the image raw data is saved rather than the correct image.", "if not pathname.endswith(end): pathname += end return pathname def makedirs(pathname): \"\"\"Create a directory,", "LCLS mode frame_trigger_type = 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while", "% (i+1) if islink(link) or exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg:", "== n: return if not self.state() == \"idle\": self.abort() while self.state() != \"idle\":", "\"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor", "makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display the message and append it to", "0.2 s delay until te detectror enters \"integrating\" state, (maybe for the clearing", "point-spread function of the fiber optic taper is large compared to the pixel", "(0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector.", "# 0 = not triggered, 1= triggered frame transfer, 2 = bulb mode,", "pathname of a file on a network file server from the local format", "pathname = filenames[0] for i in range(0,level): pathname = dirname(pathname) return pathname rayonix_detector", "[\"acquiring series\"] if (status & 0x04000000) != 0: t+= [\"series error\"] state =", "is saved as a file. The image file is written in background as", "t+= [\"acquiring series\"] if (status & 0x04000000) != 0: t+= [\"series error\"] state", "the point-spread function of the fiber optic taper is large compared to the", "readout_time(self): \"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from", "protocol v1 (timeout) \"\"\"Width and height of the current background image in pixels.", "an external hardware trigger signal. filenames: list of absolute pathnames. Directory part must", "8: 75, 10: 120} bin_factor = self.bin_factor if bin_factor in readout_rate: read_time =", "read bits 12-15: correct bits 16-19: write bits 20-23: dezinger Each filed contains", "running # under a different user id on the Rayonix control computer, compared", "0: t+= [\"acquiring series\"] if (status & 0x04000000) != 0: t+= [\"series error\"]", "self.bin_factor == n: return if not self.state() == \"idle\": self.abort() while self.state() !=", "the second parameter (\"Server command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\", and", "\"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return pathname def makedirs(pathname): \"\"\"Create a", "If this flag is set 'start' automatically reads a background image # if", "!= 0: t+= [\"integrate error\"] if (status & 0x00000100) != 0: t+= [\"read", "After a bin factor change it takes about 2 s before the new", "6 for 'test000001.rx'\"\"\" # Make sure the directory to write the image to", "sleep(0.05) # The \"start_series_triggered\" command does not allow a list of filenames #", "\"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image without spatial and", "not work with protocol v1 (timeout) \"\"\"Width and height of the current background", "def remote(pathname): \"\"\"This converts the pathname of a file on a network file", "6 4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read", "important to know if the bin factor is changed. If the backgroud image", "more than one detector\"\"\" if name is not None: self.name = name self.timeout", "!= 0: t+= [\"correct error\"] if (status & 0x00010000) != 0: t+= [\"write", "new background image, no reply get_state - reply is integer number containing 6", "logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1)", "ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name: used for IP address,", "whether the task status of \"read\" # is either \"queued\" or \"executing\" if", "os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already", "= bulb mode, 3 = LCLS mode frame_trigger_type = 2 if self.bulb_mode else", "factor is read back. t = time() while self.get_bin_factor() != n and time()-t", "== 7: t+= [\"error\"] if (status & 0x0000000F) == 8: t+= [\"busy\"] if", "If a filename is given, the image is saved as a file. The", "error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for transcript if verbose logging is", "0 and not self.ignore_first_trigger: self.trigger() # Wait for the first (suppressed) image readout", "This is to make the query method multi-thread safe. self.lock = allocate_lock() #", "start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered, 1= triggered frame", "\"\"\"Make sure that the directory of teh given filename exists by create it,", "if (status & 0x00000040) != 0: t+= [\"integrate error\"] if (status & 0x00000100)", "In case the CCD readout is in progess, execution is delayed until the", "The exception is the 'state' field, which has only 0=idle and 8=busy. writefile,<filename>,1", "dirname(filename) if directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error messages. Display", "operation. The function returns immediately. The pathname of the file is interpreted in", "3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def", "last readout is finished. This also acquires a background image, in case there", "> 2.0: self.abort() # Make sure there is a valid background image. Otherwise,", "access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import dirname", "rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD software from the Remote Control", "parts[3] path = \"\" for part in parts[4:]: path += part+\"/\" path =", "self.state() != \"idle\": sleep(0.05) # Need a valid background image before starting acquisition.", "backgroud image does not have the the same number of pixels as the", "bit 8 and 9 of the state code tell whether the task status", "directories created via NFS on the # control machine might not be writable", "= timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def", "that generates a reply. Return the reply\"\"\" self.log(\"query %r\" % command) from tcp_client", "The \"start_series_triggered\" command does not allow a list of filenames # to be", "the detector\"\"\" self.write(\"trigger,0.001\") while \"busy\" in self.state(): sleep(0.05) def get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS", "t+= [\"write error\"] if (status & 0x00100000) != 0: t+= [\"dezinger queued\"] if", "return dirname(filenames[0]) for level in range(1,4): dirnames = [] for pathname in filenames:", "0.3e Chapter 9: The Legacy Remote Mode for HS Detector Control Author: <NAME>", "dirnames += [pathname] if all([n == dirnames[0] for n in dirnames]): break pathname", "not save to first image, which is a bad # image, when using", "!= 0: t+= [\"read queued\"] if (status & 0x00000200) != 0: t+= [\"reading\"]", "tell whether the task status of \"correct\" # is either \"queued\" or \"executing\"", "image to exists. from os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base =", "/tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from tcp_client import connected return connected(self.ip_address)", "the Rayonix control computer, compared # to the beamline control computer, so directories", "while self.is_reading(): sleep(0.05) # Work-around for a bug where the detector remaingns in", "the transcript, if verbose logging is enabled.\"\"\" if not self.verbose_logging: return if len(message)", "in subclass from logging import debug,info,warn,error import socket from time import sleep,time from", "& 0x00000020) != 0) def is_reading (self): \"tells whether the chip is currently", "bits 20-23: dezinger Each filed contains a 4-bit code, with the following meaning:", "self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads", "image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter 9: The Legacy", "a UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname =", "\"executing\" return ((self.state_code() & 0x00003000) != 0) def state(self): \"\"\"Status information as string:", "filenames. When the rayonix softawre tries to save # an image the symblix", "factor change it takes about 2 s before the new # bin factor", "to save # an image the symblix link redirects is to create an", "timed by an exteranal hardware trigger signal filename_base: Directory part must be valid", "edge of the trigger initiates frame transfer/readout # 1: rising edge starts acquisition,", "to 1 ms\"\"\" from datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3] #", "rising edge of the trigger initiates frame transfer/readout # 1: rising edge starts", "and 9 of the state code tell whether the task status of \"correct\"", "# Convert separators from DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if", "so directories created via NFS on the # control machine might not be", "def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor == n:", "Rayonix software running # under a different user id on the Rayonix control", "from every image after readout before the correction is applied. \"\"\" if not", "0: t+= [\"writing\"] if (status & 0x00040000) != 0: t+= [\"write error\"] if", "returns immediately. The pathname of the file is interpreted in file system of", "factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound image, which is substracted from", "of the current background image in pixels. This value is important to know", "beamline control computer, so directories created via NFS on the # control machine", "bin factor\") def read_bkg(self): \"\"\"Reads a fresh the backgound image, which is substracted", "not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a the backgound image", "if (status & 0x00000100) != 0: t+= [\"read queued\"] if (status & 0x00000200)", "reply == \"\": return 0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state'", "ot the real filenames. When the rayonix softawre tries to save # an", "error\"] if (status & 0x00000100) != 0: t+= [\"read queued\"] if (status &", "the last read image to a file. The pathname of the file is", "\"\"\"Width and height of the current background image in pixels. This value is", "file is interpreted in file system of the server, not locally. If 'save_raw'", "self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename != None:", "get_trigger_signal_type(self): \"\"\"'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup', 'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def", "# Work-around for a bug where the detector remaingns in \"reading\" state #", "%s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical", "= {1: 2, 2: 10, 3: 15, 4: 25, 5: 40, 6: 60,", "immediately. The pathname of the file is interpreted in file system of the", "sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the", "the pathname of a file on a network file server from the local", "exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for other applications from", "the MAR CCD compter. e.g. \"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\"", "to exists. from os.path import dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base)", "reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0 try: status = int(eval(reply))", "\"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from", "not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\"", "= \"/\" if pathname.endswith(\"/\") else \"\" # Try to expand a Windows drive", "in readout_rate: read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def make_directory(self,filename):", "if len(filenames) == 0: return [] if len(filenames) == 1: return dirname(filenames[0]) for", "from sys import stderr if len(message) == 0 or message[-1] != \"\\n\": message", "self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type =", "last read image the correction as saving to file will fail. At startup,", "Otherwise, the image # correction will fail. if self.auto_bkg: self.update_bkg() self.write(\"start\") if not", "locally. If 'save_raw' is true (default: false), the image raw data is saved", "the current bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image", "Reads out the detector, corrects the image and saves it to a file", "if not exists(pathname): umask(0000) try: makedirs(pathname) except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details))", "def start(self,wait=True): \"\"\"Puts the detector into integration mode by stopping the continuous clearing.", "to the pixel size) get_bin - reply is two integer numbers, e.g. \"2,2\"", "is substracted from every image after readout before the correction is applied.\"\"\" return", "stderr if exists(pathname) and not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\"", "= 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that", "generated a series of symbilic link complying to the # naming scheme imposed", "from DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0:", "detector was last read. self.last_read = 0.0 # Verbose logging: record every command", "not iswritable(pathname): try: chmod(pathname,0777) except Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not", "== 1: return dirname(filenames[0]) for level in range(1,4): dirnames = [] for pathname", "under a different user id on the Rayonix control computer, compared # to", "name is not None: self.name = name self.timeout = 1.0 # This is", "initiates frame transfer/readout self.bulb_mode = 0 # Keep track of when the detector", "self.image_size() # By default verbose logging is enabled. Change when problem solved. logging", "format to the format used on the MAR CCD compter. e.g. \"//id14bxf/data\" in", "Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in", "message[-1] != \"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message))", "return True else: return False def is_integrating (self): \"tells whether the chip is", "server has been restarted or after the bin factor has been changed. \"\"\"", "directory = dirname(filename) if directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For error", "if self.bulb_mode == 0 and not self.ignore_first_trigger: self.trigger() # Wait for the first", "Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\" % (reply,message)) return 0 #", "\"\" # Try to expand a Windows drive letter to a UNC name.", "(status & 0x0000000F) == 6: t+= [\"unavailable\"] if (status & 0x0000000F) == 7:", "& 0x00100000) != 0: t+= [\"dezinger queued\"] if (status & 0x00200000) != 0:", "import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control the MAR CCD detector", "t+= [\"busy\"] if (status & 0x00000010) != 0: t+= [\"integrate queued\"] if (status", "= property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def set_bin_factor(self,n): if self.bin_factor", "= dirname(pathname) dirnames += [pathname] if all([n == dirnames[0] for n in dirnames]):", "Rayonix computer filename_suffix: including the dot (.) number_field_width: number of digits for the", "pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return pathname def makedirs(pathname):", "filename_suffix: including the dot (.) number_field_width: number of digits for the filename sequence", "set to \"2222\". Or, alternatively from the command line by the commaand \"hsserver_lagacy\".", "!= 0: t+= [\"acquiring series\"] if (status & 0x04000000) != 0: t+= [\"series", "software have a the backgound image for the current bin mode, which is", "15, 4: 25, 5: 40, 6: 60, 8: 75, 10: 120} bin_factor =", "return pathname def makedirs(pathname): \"\"\"Create a directory, or make sure that the directory", "that the directory is world-writable\"\"\" # This is a workaround for promblem caused", "pathname.endswith(end): pathname += end return pathname def makedirs(pathname): \"\"\"Create a directory, or make", "state # forever. <NAME> 27 Mar 2014 ##if time()-t0 > 2.0: self.abort() #", "following meaning: 0=idle, 1=queued, 2=executing, 4=error The exception is the 'state' field, which", "requires 11 trigger pulses # to aquire 10 images. # Workaround: Software-trigger the", "self.verbose_logging = True @property def connected(self): from tcp_client import connected return connected(self.ip_address) online", "or after the bin factor has been changed. \"\"\" if not self.bkg_valid(): self.read_bkg()", "is important to know if the bin factor is changed. If the backgroud", "return self.bkg_image_size() == self.image_size() # By default verbose logging is enabled. Change when", "valid background image before starting acquisition. if self.auto_bkg: self.update_bkg() if self.bulb_mode == 0", "system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a", "a list of filenames # to be specified, but uses auto-generated filenames instead.", "queued\"] if (status & 0x02000000) != 0: t+= [\"acquiring series\"] if (status &", "read_time = 1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure", "status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\" %", "and append it to the error log file. If verbose logging is enabled,", "ms\"\"\" from datetime import datetime timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds", "'start' automatically reads a background image # if there is not valid backgournd", "complying to the # naming scheme imposed by the 'start_series_triggered' command that #", "1.0/readout_rate[bin_factor] else: read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the", "file. The image file is written in background as a pipelined operation. The", "there is not valid backgournd image. self.auto_bkg = True # Whether to save", "# If this flag is set 'start' automatically reads a background image #", "link redirects is to create an image with # the specified name. from", "& 0x00000040) != 0: t+= [\"integrate error\"] if (status & 0x00000100) != 0:", "the rayonix softawre tries to save # an image the symblix link redirects", "field is \"executing\" if not self.connected: return True return ((self.state_code() & 0x00000020) !=", "- Reads out the detector, corrects the image and saves it to a", "a series of images timed by an external hardware trigger signal. filenames: list", "timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message):", "return True return ((self.state_code() & 0x00000020) != 0) def is_reading (self): \"tells whether", "pass # Convert separators from DOS style to UNIX style. pathname = pathname.replace(\"\\\\\",\"/\")", "sample remote control server program \"marccd_server_socket\" with TCP port number 2222. Usage example:", "(pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from os import access,W_OK return", "def bkg_valid(self): \"\"\"Does detector software have a the backgound image for the current", "by an exteranal hardware trigger signal filename_base: Directory part must be valid pathname", "generate a reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command", "<NAME> 27 Mar 2014 ##if time()-t0 > 2.0: self.abort() # Make sure there", "to file will fail. At startup, the background image is empty and this", "reads a background image # if there is not valid backgournd image. self.auto_bkg", "pass makedirs(tempdir) for i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link)", "image after readout before the correction is applied. \"\"\" if not self.is_idle(): self.abort()", "world-writable\"\"\" # This is a workaround for promblem caused by the Rayonix software", "& 0x0000000F) == 6: t+= [\"unavailable\"] if (status & 0x0000000F) == 7: t+=", "is set 'start' automatically reads a background image # if there is not", "exists(link): remove(link) try: pathname = relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with", "microsconds def remote(pathname): \"\"\"This converts the pathname of a file on a network", "backgound image for the current bin mode, which is substracted from every image", "and saves it to a file no reply readout,1 - reads a new", "t+= [\"writing\"] if (status & 0x00040000) != 0: t+= [\"write error\"] if (status", "expand a Windows drive letter to a UNC name. try: import win32wnet #", "time as formatted ASCCI text, precise to 1 ms\"\"\" from datetime import datetime", "self.abort() while self.state() != \"idle\": sleep(0.05) # Need a valid background image before", "self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger: # The detector software does", "make sure that the directory is world-writable\"\"\" # This is a workaround for", "of the previous image to finish. while self.is_reading(): sleep(0.05) # Work-around for a", "integer number containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire", "self.save_raw = False # For triggred image acquiation # 0: the rising edge", "readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger the detector\"\"\" self.write(\"trigger,0.001\")", "remote control the MAR CCD detector Using remote protocol version 1\"\"\" name =", "dirname if len(filenames) == 0: return [] if len(filenames) == 1: return dirname(filenames[0])", "2 = bulb mode, 3 = LCLS mode frame_trigger_type = 2 if self.bulb_mode", "bit mask 0x00444440 masks out error flags if (status & ~0x0444444F) == 0:", "self.write(\"set_bin,\"+str(n)+\",\"+str(n)) # After a bin factor change it takes about 2 s before", "import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\"", "bkg_valid(self): \"\"\"Does detector software have a the backgound image for the current bin", "# to aquire 10 images. # Workaround: Software-trigger the detector once after starting", "other applications from DB import dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series", "server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images", "& 0x00000300) != 0: self.last_read = time() return status def is_idle (self): try:", "filename == \"\": return from os.path import dirname directory = dirname(filename) if directory", "mode. However, (as of # Jul 2014, version 0.3.10), the detector still requires", "this flag is set 'start' automatically reads a background image # if there", "t+= [\"correct error\"] if (status & 0x00010000) != 0: t+= [\"write queued\"] if", "0x00100000) != 0: t+= [\"dezinger queued\"] if (status & 0x00200000) != 0: t+=", "falling edge initiates frame transfer/readout self.bulb_mode = 0 # Keep track of when", "\"4.0.1\" # default name \"rayonix_detector\" may be overridden in subclass from logging import", "\"reading\" state # forever. <NAME> 27 Mar 2014 ##if time()-t0 > 2.0: self.abort()", "(status & 0x00000400) != 0: t+= [\"read error\"] if (status & 0x00001000) !=", "saved rather than the correct image. \"\"\" if filename != None: self.make_directory(filename) if", "aquire 10 images. # Workaround: Software-trigger the detector once after starting a series.", "10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import", "on the # control machine might not be writable on the Rayonix computer.", "wait: return while not self.is_integrating() and self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series", "if (status & 0x00020000) != 0: t+= [\"writing\"] if (status & 0x00040000) !=", "logging import debug,info,warn,error import socket from time import sleep,time from thread import allocate_lock", "self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for other applications from DB import", "background image, e.g. \"2048,2048\" Reference: Rayonix HS detector manual 0.3e Chapter 9: The", "timestamp = str(datetime.now()) return timestamp[:-3] # omit microsconds def remote(pathname): \"\"\"This converts the", "# \"acquire\" field is \"executing\" if not self.connected: return True return ((self.state_code() &", "from the MarCCD software from the Remote Control control panel with the second", "if (status & 0x00100000) != 0: t+= [\"dezinger queued\"] if (status & 0x00200000)", "\"\"\"This converts the pathname of a file on a network file server from", "& 0x01000000) != 0: t+= [\"series queued\"] if (status & 0x02000000) != 0:", "in dirnames]): break pathname = filenames[0] for i in range(0,level): pathname = dirname(pathname)", "is_correcting (self): \"tells whether the chip is currently being read out\" # bit", "%s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For non-critical messages. Append the message to", "written in background as a pipelined operation. The function returns immediately. The pathname", "\"start_series_triggered\" command does not allow a list of filenames # to be specified,", "the pixel size) get_bin - reply is two integer numbers, e.g. \"2,2\" get_size_bkg", "reply\"\"\" from tcp_client import write write(self.ip_address,command) def query(self,command): \"\"\"Send a command that generates", "= time() def save_image(self,filename): \"\"\"Saves the last read image to a file. The", "os.path import dirname,relpath,islink,exists from os import symlink,remove from shutil import rmtree directory =", "the backgroud image does not have the the same number of pixels as", "pathname = dirname(pathname) dirnames += [pathname] if all([n == dirnames[0] for n in", "of images timed by an external hardware trigger signal. filenames: list of absolute", "if (status & 0x00200000) != 0: t+= [\"dezingering\"] if (status & 0x00400000) !=", "trigger initiates frame transfer/readout # 1: rising edge starts acquisition, # falling edge", "return state def start(self,wait=True): \"\"\"Puts the detector into integration mode by stopping the", "transcript.\"\"\" from sys import stderr if len(message) == 0 or message[-1] != \"\\n\":", "read the CCD and stores the result as background while not self.is_idle(): sleep(0.05)", "True else: return False def is_integrating (self): \"tells whether the chip is integrating", "software from the Remote Control control panel with the second parameter (\"Server command\"", "in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link) try:", "Make sure there is a valid background image. Otherwise, the image # correction", "connected return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a comman that does", "= [] if (status & 0x0000000F) == 6: t+= [\"unavailable\"] if (status &", "0x00040000) != 0: t+= [\"write error\"] if (status & 0x00100000) != 0: t+=", "'save_raw' is true (default: false), the image raw data is saved rather than", "self.bkg_image_size() == self.image_size() # By default verbose logging is enabled. Change when problem", "correction is applied. \"\"\" if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\")", "error messages. Display the message and append it to the error log file.", "flag is set 'start' automatically reads a background image # if there is", "# 1: rising edge starts acquisition, # falling edge initiates frame transfer/readout self.bulb_mode", "tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply =", "self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # Need a valid", "to a UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname", "is also added to the transcript.\"\"\" from sys import stderr if len(message) ==", "bug where the detector remaingns in \"reading\" state # forever. <NAME> 27 Mar", "pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass # Convert separators from DOS style to", "in filenames: for i in range(0,level): pathname = dirname(pathname) dirnames += [pathname] if", "shutil import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir) except: pass", "control of the MAR CCD detector, using <NAME>'s sample remote control server program", "used for IP address, in case there is more than one detector\"\"\" if", "image. self.auto_bkg = True # Whether to save corrected or raw images. self.save_raw", "different user id on the Rayonix control computer, compared # to the beamline", "is the 'state' field, which has only 0=idle and 8=busy. writefile,<filename>,1 - Save", "self.lock = allocate_lock() # If this flag is set 'start' automatically reads a", "to first image, which is a bad # image, when using triggered frame", "error\"] if (status & 0x00010000) != 0: t+= [\"write queued\"] if (status &", "already in sequence aquistion mode, cancel it. if not self.state() == \"idle\": self.abort()", "caused by the Rayonix software running # under a different user id on", "series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a filename is", "maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname end = \"/\"", "+= \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s: %s\" % (t,message)) def get_error_logfile(self): \"\"\"File name", "and saves the uncorrected image as a file. The image file is written", "to make the query method multi-thread safe. self.lock = allocate_lock() # If this", "work-araound generated a series of symbilic link complying to the # naming scheme", "the directory of teh given filename exists by create it, if necessary.\"\"\" if", "make the query method multi-thread safe. self.lock = allocate_lock() # If this flag", "sure there is a valid background image. Otherwise, the image # correction will", "= \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, # interval_time=0,frame_trigger_type,series_trigger_type=0, # filename_base,filename_suffix,number_field_width # 0 = not triggered,", "bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def read_bkg(self): \"\"\"Reads a", "& 0x00040000) != 0: t+= [\"write error\"] if (status & 0x00100000) != 0:", "raw images. self.save_raw = False # For triggred image acquiation # 0: the", "\"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by an", "image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize def bkg_image_size(self): # does not", "sequence number, e.g. 6 for 'test000001.rx'\"\"\" # Make sure the directory to write", "computer. # E.g. user id 10660(xppopr) on \"xpp-daq\", versus user id 500(hsuser) #", "detector, using <NAME>'s sample remote control server program \"marccd_server_socket\" with TCP port number", "\"\"\" ##t0 = time() # Wait for the readout of the previous image", "tries to save # an image the symblix link redirects is to create", "pixels is not used, because the point-spread function of the fiber optic taper", "return False def is_integrating (self): \"tells whether the chip is integrating mode (not", "image after readout before the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() #", "40, 6: 60, 8: 75, 10: 120} bin_factor = self.bin_factor if bin_factor in", "\"\": return 0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad", "= property(get_logfile) def timestamp(): \"\"\"Current date and time as formatted ASCCI text, precise", "number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the", "& 0x0000000F) == 8: t+= [\"busy\"] if (status & 0x00000010) != 0: t+=", "& 0x02000000) != 0: t+= [\"acquiring series\"] if (status & 0x04000000) != 0:", "return ((self.state_code() & 0x00000020) != 0) def is_reading (self): \"tells whether the chip", "\"marccd_server_socket\" with TCP port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server", "bits 12-15: correct bits 16-19: write bits 20-23: dezinger Each filed contains a", "enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp(): \"\"\"Current", "of the fiber optic taper is large compared to the pixel size) get_bin", "\"/home/marccdsource/servers/marccd_server_socket\", and the third parameter (\"Server Arguments\" or \"Personal Name\") set to \"2222\".", "Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname end =", "import sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to remote control", "does not allow a list of filenames # to be specified, but uses", "Append the message to the transcript, if verbose logging is enabled.\"\"\" if not", "\"\"\"Saves the last read image without spatial and uniformity correction to a file.", "list of absolute pathnames. Directory part must be valid pathname on file system", "message += \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\"", "the # control machine might not be writable on the Rayonix computer. #", "to the error log file. If verbose logging is enabled, it is also", "connected def write(self,command): \"\"\"Sends a comman that does not generate a reply\"\"\" from", "image if needed, for instance after the server has been restarted or after", "False @property def readout_time(self): \"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor", "pathname on file system of the Rayonix computer filename_suffix: including the dot (.)", "2014, version 0.3.10), the detector still requires 11 trigger pulses # to aquire", "dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already in sequence aquistion mode, cancel", "also acquires a background image, in case there is no valid background image", "self.get_bin_factor() != n and time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout", "if not self.bkg_valid(): self.read_bkg() def bkg_valid(self): \"\"\"Does detector software have a the backgound", "os import makedirs,umask,chmod from os.path import exists from sys import stderr if exists(pathname)", "Use full readout mode (2048x2048 pixels), no reply (The 1x1 bin mode with", "be valid pathname on file system of the Rayonix computer filename_suffix: including the", "stderr if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\" t", "every image after readout before the correction is applied. \"\"\" if not self.is_idle():", "Exception,details: stderr.write(\"chmod: %r: %r\" % (pathname,details)) if not exists(pathname): umask(0000) try: makedirs(pathname) except", "a filename is given, the image is saved as a file. The image", "1 from numpy import nan # Readout rate in frames per second as", "or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\"", "in pixels. This value is important to know if the bin factor is", "log(self,message): \"\"\"For non-critical messages. Append the message to the transcript, if verbose logging", "part+\"/\" path = path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end", "dirnames = [] for pathname in filenames: for i in range(0,level): pathname =", "# an image the symblix link redirects is to create an image with", "correcting and displaying the image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the", "return ((self.state_code() & 0x00000300) != 0) def is_correcting (self): \"tells whether the chip", "0 def filesize(self,bin_factor): \"\"\"Image file size in bytes including headers bin_facor: 2,4,8,16\"\"\" image_size", "t+= [\"reading\"] if (status & 0x00000400) != 0: t+= [\"read error\"] if (status", "return 0 def filesize(self,bin_factor): \"\"\"Image file size in bytes including headers bin_facor: 2,4,8,16\"\"\"", "problem solved. logging = False @property def readout_time(self): \"\"\"Estimated readout time in seconds.", "factor: readout_rate = {1: 2, 2: 10, 3: 15, 4: 25, 5: 40,", "# Need a valid background image before starting acquisition. if self.auto_bkg: self.update_bkg() if", "\"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s:", "image raw data is saved rather than the correct image. \"\"\" if filename", "is_idle (self): try: status = self.state_code() except: return True # bit mask 0x00444440", "as saving to file will fail. At startup, the background image is empty", "2014 ##if time()-t0 > 2.0: self.abort() # Make sure there is a valid", "directory of teh given filename exists by create it, if necessary.\"\"\" if filename", "clearing)\" # \"acquire\" field is \"executing\" if not self.connected: return True return ((self.state_code()", "to write the image to exists. from os.path import dirname directory = dirname(filename_base)", "os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path", "& 0x00000400) != 0: t+= [\"read error\"] if (status & 0x00001000) != 0:", "512x512-pixel bin mode, no reply set_bin,2,2 - Use full readout mode (2048x2048 pixels),", "is interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\")", "time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin", "is to remote control the MAR CCD detector Using remote protocol version 1\"\"\"", "def save_raw_image(self,filename): \"\"\"Saves the last read image without spatial and uniformity correction to", "& ~0x0444444F) == 0: return \"idle\" t = [] if (status & 0x0000000F)", "= rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD software from the Remote", "is either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300) != 0) def is_correcting", "system of the Rayonix computer\"\"\" # The detector will ignore an \"acquire_images_triggered\" command", "== 8: t+= [\"busy\"] if (status & 0x00000010) != 0: t+= [\"integrate queued\"]", "(.) number_field_width: number of digits for the filename sequence number, e.g. 6 for", "self.auto_bkg: self.update_bkg() self.write(\"start\") if not wait: return while not self.is_integrating() and self.connected: sleep", "is saved rather than the correct image. \"\"\" if filename != None: self.make_directory(filename)", "e.g. 6 for 'test000001.rx'\"\"\" # Make sure the directory to write the image", "bin mode\"\"\" try: return int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file size", "If 'save_raw' is true (default: false), the image raw data is saved rather", "is enabled. Change when problem solved. logging = False @property def readout_time(self): \"\"\"Estimated", "applied. \"\"\" if not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read", "user id 500(hsuser) # on \"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import", "ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\") The server is started from the MarCCD software from the", "import symlink,remove from shutil import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try:", "!= 0: t+= [\"integrate queued\"] if (status & 0x00000020) != 0: t+= [\"integrating\"]", "self.auto_bkg: self.update_bkg() if self.bulb_mode == 0 and not self.ignore_first_trigger: # The detector software", "symlink,remove from shutil import rmtree directory = common_topdir(filenames) tempdir = directory+\"/.rayonix_temp\" try: rmtree(tempdir)", "height of the image in pixels at the current bin mode\"\"\" try: return", "if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else: self.write(\"readout,0\") else: if filename", "is not used, because the point-spread function of the fiber optic taper is", "a series of images timed by an exteranal hardware trigger signal filename_base: Directory", "!= 0: t+= [\"dezinger error\"] if (status & 0x01000000) != 0: t+= [\"series", "promblem caused by the Rayonix software running # under a different user id", "into integration mode by stopping the continuous clearing. In case the CCD readout", "is two integer numbers, e.g. \"2,2\" get_size_bkg - reply is the number of", "if reply == \"\": return 0 try: status = int(eval(reply)) except Exception,message: self.log_error(\"command", "finished. This also acquires a background image, in case there is no valid", "(n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring series\": sleep(0.05) if self.bulb_mode == 0 and not", "the detector once after starting a series. self.trigger_signal_type = \"Software\" # start_series,n_frames,first_frame_number=1,integration_time=0, #", "os.path import dirname if len(filenames) == 0: return [] if len(filenames) == 1:", "\"queued\" or \"executing\" return ((self.state_code() & 0x00000300) != 0) def is_correcting (self): \"tells", "\"executing\" if (status & 0x00000300) != 0: self.last_read = time() return status def", "corrects the image and saves it to a file no reply readout,1 -", "= time() def image_size(self): \"\"\"Width and height of the image in pixels at", "text, precise to 1 ms\"\"\" from datetime import datetime timestamp = str(datetime.now()) return", "clearing to stop?) When wait=False, do no wait for this to happen. \"\"\"", "except: return True # bit mask 0x00444440 masks out error flags if (status", "self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the detector out without correcting and", "dirname directory = dirname(filename) if directory == \"\": return makedirs(directory) def log_error(self,message): \"\"\"For", "%s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image", "is enabled, it is also added to the transcript.\"\"\" from sys import stderr", "locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image without spatial", "for testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s: %(message)s\") self = rayonix_detector", "== 0: return \"idle\" t = [] if (status & 0x0000000F) == 6:", "filename != None: self.make_directory(filename) if not self.save_raw: if filename != None: self.write(\"readout,0,\"+remote(filename)) else:", "for transcript if verbose logging is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\"", "applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default verbose logging is enabled. Change", "\"\"\"Puts the detector into integration mode by stopping the continuous clearing. In case", "image.\" self.write(\"readout,3\") self.last_read = time() def save_image(self,filename): \"\"\"Saves the last read image to", "compared to the pixel size) get_bin - reply is two integer numbers, e.g.", "pixels. This value is important to know if the bin factor is changed.", "\"\"\"For non-critical messages. Append the message to the transcript, if verbose logging is", "MS340HS headersize = 4096 image_nbytes = 2*image_size**2 filesize = headersize+image_nbytes return filesize def", "t = timestamp() stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message)", "fields bits 0-3: state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read bits 12-15:", "# Keep track of when the detector was last read. self.last_read = 0.0", "By default verbose logging is enabled. Change when problem solved. logging = False", "read_time = nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory of", "0: t+= [\"integrate queued\"] if (status & 0x00000020) != 0: t+= [\"integrating\"] if", "return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\" in self.state(): sleep(0.05) trigger_signal_type", "for the first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def", "be specified, but uses auto-generated filenames instead. # As a work-araound generated a", "\"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1 from numpy", "9 of the state code tell whether the task status of \"read\" #", "control server program \"marccd_server_socket\" with TCP port number 2222. Usage example: ccd =", "to a file. The pathname of the file is interpreted in file system", "computer\"\"\" # The detector will ignore an \"acquire_images_triggered\" command if not # in", "which is substracted from every image after readout before the correction is applied.\"\"\"", "reply (The 1x1 bin mode with 4096x4096 pixels is not used, because the", "t+= [\"correcting\"] if (status & 0x00004000) != 0: t+= [\"correct error\"] if (status", "was last read. self.last_read = 0.0 # Verbose logging: record every command and", "from time import sleep,time from thread import allocate_lock class Rayonix_Detector(object): \"\"\"This is to", "last read. self.last_read = 0.0 # Verbose logging: record every command and reply", "file system of the Rayonix computer filename_suffix: including the dot (.) number_field_width: number", "Use 512x512-pixel bin mode, no reply set_bin,2,2 - Use full readout mode (2048x2048", "if (status & 0x0000000F) == 7: t+= [\"error\"] if (status & 0x0000000F) ==", "interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read", "if (status & 0x00040000) != 0: t+= [\"write error\"] if (status & 0x00100000)", "else: self.write(\"readout,0\") else: if filename != None: self.write(\"readout,3,\"+remote(filename)) else: self.write(\"readout,3\") ##while not self.is_reading():", "self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",1\") def save_raw_image(self,filename): \"\"\"Saves the last read image without spatial and uniformity", "not # in \"idle\" state. if not self.state() == \"idle\": self.abort() while self.state()", "not self.is_idle(): self.abort() while not self.is_idle(): sleep(0.05) self.write(\"readout,1\") # read the CCD and", "nan # Readout rate in frames per second as function of bin factor:", "def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import dirname if len(filenames) ==", "& 0x00004000) != 0: t+= [\"correct error\"] if (status & 0x00010000) != 0:", "0: t+= [\"integrate error\"] if (status & 0x00000100) != 0: t+= [\"read queued\"]", "If already in sequence aquistion mode, cancel it. if not self.state() == \"idle\":", "sleep(0.05) self.write(\"readout,1\") # read the CCD and stores the result as background while", "filenames: list of absolute pathnames. Directory part must be valid pathname on file", "pathname of the file is interpreted in file system of the server, not", "non-critical messages. Append the message to the transcript, if verbose logging is enabled.\"\"\"", "== \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) # The \"start_series_triggered\" command does", "(The 1x1 bin mode with 4096x4096 pixels is not used, because the point-spread", "the message to the transcript, if verbose logging is enabled.\"\"\" if not self.verbose_logging:", "reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def connected(self): from tcp_client import connected", "def readout_and_save_raw(self,filename): \"\"\"Reads the detector and saves the uncorrected image as a file.", "for this to happen. \"\"\" ##t0 = time() # Wait for the readout", "'start_series_triggered' command that # point ot the real filenames. When the rayonix softawre", "\"rayonix_detector\" may be overridden in subclass from logging import debug,info,warn,error import socket from", "# to the beamline control computer, so directories created via NFS on the", "0x00000040) != 0: t+= [\"integrate error\"] if (status & 0x00000100) != 0: t+=", "== 0: # //server/share/directory/file parts = pathname.split(\"/\") if len(parts) >= 4: server =", "of pixels as the last read image the correction as saving to file", "from the local format to the format used on the MAR CCD compter.", "0.0 # Verbose logging: record every command and reply in /tmp/rayonix_detector.log self.verbose_logging =", "def connected(self): from tcp_client import connected return connected(self.ip_address) online = connected def write(self,command):", "\"\" for part in parts[4:]: path += part+\"/\" path = path.rstrip(\"/\") pathname =", "signal filename_base: Directory part must be valid pathname on file system of the", "subclass from logging import debug,info,warn,error import socket from time import sleep,time from thread", "acquisition, # falling edge initiates frame transfer/readout self.bulb_mode = 0 # Keep track", "CCD and stores the result as background while not self.is_idle(): sleep(0.05) self.last_read =", "there is more than one detector\"\"\" if name is not None: self.name =", "the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads", "the directory to write the image to exists. from os.path import dirname directory", "i in range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link)", "range(0,len(filenames)): link = tempdir+\"/%06d.rx\" % (i+1) if islink(link) or exists(link): remove(link) try: pathname", "== 0 or message[-1] != \"\\n\": message += \"\\n\" t = timestamp() file(self.logfile,\"a\").write(\"%s:", "# is either \"queued\" or \"executing\" if (status & 0x00000300) != 0: self.last_read", "image to finish. while self.is_reading(): sleep(0.05) # Work-around for a bug where the", "filenames[i] try: symlink(pathname,link) except Exception,msg: error(\"Cannot create of %r to %r: %s\" %", "When wait=False, do no wait for this to happen. \"\"\" ##t0 = time()", "logging is enabled.\"\"\" if not self.verbose_logging: return if len(message) == 0 or message[-1]", "pathname.split(\"/\") if len(parts) >= 4: server = parts[2] ; share = parts[3] path", "create of %r to %r: %s\" % (pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6)", "a bug where the detector remaingns in \"reading\" state # forever. <NAME> 27", "reply is integer number containing 6 4-bit fields bits 0-3: state: 0=idle,8=busy bits", "0: t+= [\"dezinger queued\"] if (status & 0x00200000) != 0: t+= [\"dezingering\"] if", "detectror enters \"integrating\" state, (maybe for the clearing to stop?) When wait=False, do", "1.0 # This is to make the query method multi-thread safe. self.lock =", "with the second parameter (\"Server command\" or \"Device Database Server\") set to \"/home/marccdsource/servers/marccd_server_socket\",", "stopping the continuous clearing. In case the CCD readout is in progess, execution", "int(self.query(\"get_size\").split(\",\")[0]) except: return 0 def filesize(self,bin_factor): \"\"\"Image file size in bytes including headers", "alternatively from the command line by the commaand \"hsserver_lagacy\". The server understand the", "or folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list", "a network file server from the local format to the format used on", "no reply readout,0,filename - Reads out the detector, corrects the image and saves", "a comman that does not generate a reply\"\"\" from tcp_client import write write(self.ip_address,command)", "= 2 if self.bulb_mode else 1 self.write(\"start_series,%d,1,0,0,%d,0,%s,%s,%d\" % (n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width)) while self.state() != \"acquiring", "# After a bin factor change it takes about 2 s before the", "series of images timed by an external hardware trigger signal. filenames: list of", "If verbose logging is enabled, it is also added to the transcript.\"\"\" from", "readout before the correction is applied.\"\"\" return self.bkg_image_size() == self.image_size() # By default", "t+= [\"error\"] if (status & 0x0000000F) == 8: t+= [\"busy\"] if (status &", "triggered, 1= triggered frame transfer, 2 = bulb mode, 3 = LCLS mode", "if not wait: return while not self.is_integrating() and self.connected: sleep (0.05) def abort(self):", "[\"error\"] if (status & 0x0000000F) == 8: t+= [\"busy\"] if (status & 0x00000010)", "Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname = win32wnet.WNetGetUniversalName(pathname) except: pass #", "while self.get_bin_factor() != n and time()-t < 3: sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor,", "[\"unavailable\"] if (status & 0x0000000F) == 7: t+= [\"error\"] if (status & 0x0000000F)", "than one detector\"\"\" if name is not None: self.name = name self.timeout =", "if (status & 0x00400000) != 0: t+= [\"dezinger error\"] if (status & 0x01000000)", "Pulldown Inerted','CMOS Pullup Inerted''Software'\"\"\" return self.query(\"get_trigger_signal_type\") def set_trigger_signal_type(self,value): self.write(\"set_trigger_signal_type,%s\" % value) while \"busy\"", "(status & 0x00000300) != 0: self.last_read = time() return status def is_idle (self):", "8-11: read bits 12-15: correct bits 16-19: write bits 20-23: dezinger Each filed", "valid backgournd image. self.auto_bkg = True # Whether to save corrected or raw", "is enabled.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector.log\" logfile = property(get_logfile) def timestamp():", "= Rayonix_Detector() if __name__ == \"__main__\": # for testing from pdb import pm", "0x00000300) != 0) def is_correcting (self): \"tells whether the chip is currently being", "idle,integating,reading,writing\"\"\" try: status = self.state_code() except: return \"\" # bit mask 0x00444440 masks", "\"con-ics-xpp-rayonix\" from os import makedirs,umask,chmod from os.path import exists from sys import stderr", "background image, no reply get_state - reply is integer number containing 6 4-bit", "computer, compared # to the beamline control computer, so directories created via NFS", "range(0,level): pathname = dirname(pathname) dirnames += [pathname] if all([n == dirnames[0] for n", "of strings\"\"\" from os.path import dirname if len(filenames) == 0: return [] if", "if (status & 0x00000020) != 0: t+= [\"integrating\"] if (status & 0x00000040) !=", "except Exception,details: stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder", "stderr.write(\"makedirs: %r: %r\" % (pathname,details)) def iswritable(pathname): \"\"\"Is file or folder writable?\"\"\" from", "by an external hardware trigger signal. filenames: list of absolute pathnames. Directory part", "True @property def connected(self): from tcp_client import connected return connected(self.ip_address) online = connected", "the task status of \"read\" # is either \"queued\" or \"executing\" return ((self.state_code()", "the the same number of pixels as the last read image the correction", "Windows drive letter to a UNC name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\"", "= name self.timeout = 1.0 # This is to make the query method", "remote control server program \"marccd_server_socket\" with TCP port number 2222. Usage example: ccd", "file. The pathname of the file is interpreted in file system of the", "have the the same number of pixels as the last read image the", "started from the MarCCD software from the Remote Control control panel with the", "return 0 def update_bkg(self): \"\"\"Updates the backgound image if needed, for instance after", "created via NFS on the # control machine might not be writable on", "series\"] if (status & 0x04000000) != 0: t+= [\"series error\"] state = \",\".join(t)", "def acquire_images_triggered(self,filenames): \"\"\"Acquire a series of images timed by an external hardware trigger", "fiber optic taper is large compared to the pixel size) get_bin - reply", "is \"executing\" if not self.connected: return True return ((self.state_code() & 0x00000020) != 0)", "writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\"", "corrected or raw images. self.save_raw = False # For triggred image acquiation #", "state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return", "still requires 11 trigger pulses # to aquire 10 images. # Workaround: Software-trigger", "\"queued\" or \"executing\" if (status & 0x00000300) != 0: self.last_read = time() return", "import connected return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a comman that", "enters \"integrating\" state, (maybe for the clearing to stop?) When wait=False, do no", "append it to the error log file. If verbose logging is enabled, it", "server program \"marccd_server_socket\" with TCP port number 2222. Usage example: ccd = rayonix_detector(\"marccd043.cars.aps.anl.gov:2222\")", "no wait for this to happen. \"\"\" ##t0 = time() # Wait for", "whether the task status of \"correct\" # is either \"queued\" or \"executing\" return", "== 0 and not self.ignore_first_trigger: # The detector software does not save to", "dirname(filenames[0]) for level in range(1,4): dirnames = [] for pathname in filenames: for", "__name__ == \"__main__\": # for testing from pdb import pm import logging logging.basicConfig(level=logging.DEBUG,format=\"%(asctime)s:", "self.log(\"query %r\" % command) from tcp_client import query return query(self.ip_address,command) def state_code(self): \"\"\"Status", "Puts the CCD to integration mode, no reply readout,0,filename - Reads out the", "dbput dbput(\"rayonix_detector_images.filenames\",repr(filenames)) def start_series_triggered(self,n_frames,filename_base, filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by an", "images. # Workaround: Software-trigger the detector once after starting a series. self.trigger_signal_type =", "[\"reading\"] if (status & 0x00000400) != 0: t+= [\"read error\"] if (status &", "1: rising edge starts acquisition, # falling edge initiates frame transfer/readout self.bulb_mode =", "readout of the previous image to finish. while self.is_reading(): sleep(0.05) # Work-around for", "it. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05) #", "is no valid background image (after startup or binning changed). wait: The is", "save # an image the symblix link redirects is to create an image", "first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\" def trigger(self): \"\"\"Software-trigger", "2018-06-101 \"\"\" __version__ = \"4.0.1\" # default name \"rayonix_detector\" may be overridden in", "or make sure that the directory is world-writable\"\"\" # This is a workaround", "logging: record every command and reply in /tmp/rayonix_detector.log self.verbose_logging = True @property def", "command does not allow a list of filenames # to be specified, but", "self.auto_bkg = True # Whether to save corrected or raw images. self.save_raw =", "the file is interpreted in file system of the server, not locally. \"\"\"", "= relpath(filenames[i],tempdir) except Exception,msg: error(\"Relative path of %r with respect to %r: %s\"", "while not self.is_idle(): sleep(0.05) self.last_read = time() def image_size(self): \"\"\"Width and height of", "message to the transcript, if verbose logging is enabled.\"\"\" if not self.verbose_logging: return", "symbilic link complying to the # naming scheme imposed by the 'start_series_triggered' command", "# control machine might not be writable on the Rayonix computer. # E.g.", "without spatial and uniformity correction to a file. The pathname of the file", "number of pixels as the last read image the correction as saving to", "0: t+= [\"series error\"] state = \",\".join(t) return state def start(self,wait=True): \"\"\"Puts the", "\"acquire_images_triggered\" command if not # in \"idle\" state. if not self.state() == \"idle\":", "file is written in background as a pipelined operation. The function returns immediately.", "if (status & 0x02000000) != 0: t+= [\"acquiring series\"] if (status & 0x04000000)", "+= end return pathname def makedirs(pathname): \"\"\"Create a directory, or make sure that", "make_directory(self,filename): \"\"\"Make sure that the directory of teh given filename exists by create", "\"\"\" Remote control of the MAR CCD detector, using <NAME>'s sample remote control", "% (reply,message)) return 0 # bit 8 and 9 of the state code", "return 0 # bit 8 and 9 of the state code tell whether", "0x00400000) != 0: t+= [\"dezinger error\"] if (status & 0x01000000) != 0: t+=", "self.connected: sleep (0.05) def abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads", "the detector out without correcting and displaying the image.\" self.write(\"readout,3\") self.last_read = time()", "for IP address, in case there is more than one detector\"\"\" if name", "The server is started from the MarCCD software from the Remote Control control", "t+= [\"dezinger queued\"] if (status & 0x00200000) != 0: t+= [\"dezingering\"] if (status", "import dirname directory = dirname(filename_base) makedirs(directory) filename_base = remote(filename_base) # If already in", "Readout rate in frames per second as function of bin factor: readout_rate =", "default name \"rayonix_detector\" may be overridden in subclass from logging import debug,info,warn,error import", "Make sure the directory to write the image to exists. from os.path import", "part must be valid pathname on file system of the Rayonix computer\"\"\" #", "\"\"\"Is file or folder writable?\"\"\" from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames):", "'bin_factor'.\"\"\" safetyFactor = 1 from numpy import nan # Readout rate in frames", "Directory part must be valid pathname on file system of the Rayonix computer\"\"\"", "= int(eval(reply)) except Exception,message: self.log_error(\"command 'get_state' generated bad reply %r: %s\" % (reply,message))", "the image is saved as a file. The image file is written in", "0) def state(self): \"\"\"Status information as string: idle,integating,reading,writing\"\"\" try: status = self.state_code() except:", "from persistent_property import persistent_property ip_address = persistent_property(\"ip_address\",\"mx340hs.cars.aps.anl.gov:2222\") ignore_first_trigger = persistent_property(\"ignore_first_trigger\",True) def __init__(self,name=None): \"\"\"name:", "nan return read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory of teh given", "with protocol v1 (timeout) \"\"\"Width and height of the current background image in", "0x00000020) != 0) def is_reading (self): \"tells whether the chip is currently being", "self.write(\"readout,3\") ##while not self.is_reading(): sleep(0.05) self.last_read = time() def readout_and_save_raw(self,filename): \"\"\"Reads the detector", "self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a filename is given, the image", "on file system of the Rayonix computer filename_suffix: including the dot (.) number_field_width:", "import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from os.path import", "overridden in subclass from logging import debug,info,warn,error import socket from time import sleep,time", "the symblix link redirects is to create an image with # the specified", "bad reply %r: %s\" % (reply,message)) return 0 # bit 8 and 9", "not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self): \"Reads the detector", "\"idle\" state. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\": sleep(0.05)", "MAR CCD detector, using <NAME>'s sample remote control server program \"marccd_server_socket\" with TCP", "filename_base: Directory part must be valid pathname on file system of the Rayonix", "property(get_logfile) def timestamp(): \"\"\"Current date and time as formatted ASCCI text, precise to", "self.name = name self.timeout = 1.0 # This is to make the query", "9 of the state code tell whether the task status of \"correct\" #", "background image in pixels. This value is important to know if the bin", "mode, cancel it. if not self.state() == \"idle\": self.abort() while self.state() != \"idle\":", "\"\"\"File name error messages.\"\"\" from tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile)", "def log(self,message): \"\"\"For non-critical messages. Append the message to the transcript, if verbose", "enabled. Change when problem solved. logging = False @property def readout_time(self): \"\"\"Estimated readout", "from os.path import dirname if len(filenames) == 0: return [] if len(filenames) ==", "log file. If verbose logging is enabled, it is also added to the", "the 'start_series_triggered' command that # point ot the real filenames. When the rayonix", "by the Rayonix software running # under a different user id on the", "by the commaand \"hsserver_lagacy\". The server understand the following commands: start - Puts", "wait for this to happen. \"\"\" ##t0 = time() # Wait for the", "in frames per second as function of bin factor: readout_rate = {1: 2,", "bin mode with 4096x4096 pixels is not used, because the point-spread function of", "(pathname,link,msg)) if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i])) self.start_series_triggered(len(filenames),tempdir+\"/\",\".rx\",6) # Save location of image files for", "aquistion mode, cancel it. if not self.state() == \"idle\": self.abort() while self.state() !=", "the Rayonix computer. # E.g. user id 10660(xppopr) on \"xpp-daq\", versus user id", "# bit 8 and 9 of the state code tell whether the task", "filename_suffix=\".rx\",number_field_width=6): \"\"\"Acquire a series of images timed by an exteranal hardware trigger signal", "status of \"read\" # is either \"queued\" or \"executing\" if (status & 0x00000300)", "CCD readout is in progess, execution is delayed until the last readout is", "imposed by the 'start_series_triggered' command that # point ot the real filenames. When", "the query method multi-thread safe. self.lock = allocate_lock() # If this flag is", "\"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if reply == \"\": return 0", "= parts[2] ; share = parts[3] path = \"\" for part in parts[4:]:", "interpreted in file system of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"writefile,\"+remote(filename)+\",0\") def", "in background as a pipelined operation. The function returns immediately. The pathname of", "bkg_image_size(self): # does not work with protocol v1 (timeout) \"\"\"Width and height of", "def readout_time(self): \"\"\"Estimated readout time in seconds. Changes with 'bin_factor'.\"\"\" safetyFactor = 1", "comman that does not generate a reply\"\"\" from tcp_client import write write(self.ip_address,command) def", "UNIX style. pathname = pathname.replace(\"\\\\\",\"/\") if pathname.find(\"//\") == 0: # //server/share/directory/file parts =", "when using triggered frame transfer mode. However, (as of # Jul 2014, version", "the uncorrected image as a file. The image file is written in background", "# forever. <NAME> 27 Mar 2014 ##if time()-t0 > 2.0: self.abort() # Make", "name. try: import win32wnet # Convert \"J:/anfinrud_0811/Data\" to \"J:\\anfinrud_0811\\Data\". pathname = pathname.replace(\"/\",\"\\\\\") pathname", "[pathname] if all([n == dirnames[0] for n in dirnames]): break pathname = filenames[0]", "0x00000300) != 0: self.last_read = time() return status def is_idle (self): try: status", "stderr.write(\"%s: %s: %s\" % (t,self.ip_address,message)) file(self.error_logfile,\"a\").write(\"%s: %s\" % (t,message)) self.log(message) def log(self,message): \"\"\"For", "# Try to expand a Windows drive letter to a UNC name. try:", "uncorrected image as a file. The image file is written in background as", "read_time*safetyFactor def make_directory(self,filename): \"\"\"Make sure that the directory of teh given filename exists", "try: status = self.state_code() except: return True # bit mask 0x00444440 masks out", "dezinger Each filed contains a 4-bit code, with the following meaning: 0=idle, 1=queued,", "and height of the image in pixels at the current bin mode\"\"\" try:", "point ot the real filenames. When the rayonix softawre tries to save #", "tempfile import gettempdir return gettempdir()+\"/rayonix_detector_error.log\" error_logfile = property(get_error_logfile) def get_logfile(self): \"\"\"File name for", "masks out error flags if (status & ~0x0444444F) == 0: return True else:", "exteranal hardware trigger signal filename_base: Directory part must be valid pathname on file", "if (status & 0x00010000) != 0: t+= [\"write queued\"] if (status & 0x00020000)", "query return query(self.ip_address,command) def state_code(self): \"\"\"Status information as integer\"\"\" reply = self.query(\"get_state\").strip(\"\\n\\0\") if", "to integration mode, no reply readout,0,filename - Reads out the detector, corrects the", "uses auto-generated filenames instead. # As a work-araound generated a series of symbilic", "0: t+= [\"read error\"] if (status & 0x00001000) != 0: t+= [\"correct queued\"]", "Wait for the first (suppressed) image readout to complete. sleep(self.readout_time) self.trigger_signal_type = \"Opto\"", "startup or binning changed). wait: The is a 0.2 s delay until te", "__future__ import with_statement \"\"\" Remote control of the MAR CCD detector, using <NAME>'s", "of bin factor: readout_rate = {1: 2, 2: 10, 3: 15, 4: 25,", "[\"read queued\"] if (status & 0x00000200) != 0: t+= [\"reading\"] if (status &", "state: 0=idle,8=busy bits 4-7: acquire bits 8-11: read bits 12-15: correct bits 16-19:", "computer, so directories created via NFS on the # control machine might not", "0: return \"idle\" t = [] if (status & 0x0000000F) == 6: t+=", "class Rayonix_Detector(object): \"\"\"This is to remote control the MAR CCD detector Using remote", "except: pass # Convert separators from DOS style to UNIX style. pathname =", "\"//id14bxf/data\" in Windows maps to \"/net/id14bxf/data\" on Unix\"\"\" if not pathname: return pathname", "detector manual 0.3e Chapter 9: The Legacy Remote Mode for HS Detector Control", "if name is not None: self.name = name self.timeout = 1.0 # This", "abort(self): \"\"\"Cancel series acquiation mode\"\"\" self.write(\"abort\") def readout(self,filename=None): \"\"\"Reads the detector. If a", "= True # Whether to save corrected or raw images. self.save_raw = False", "t+= [\"series queued\"] if (status & 0x02000000) != 0: t+= [\"acquiring series\"] if", "path.rstrip(\"/\") pathname = \"/net/\"+server+\"/\"+share+\"/\"+path if not pathname.endswith(end): pathname += end return pathname def", "1: return dirname(filenames[0]) for level in range(1,4): dirnames = [] for pathname in", "The function returns immediately. The pathname of the file is interpreted in file", "self.state(): sleep(0.05) trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type) def get_bin_factor(self): try: return int(self.query(\"get_bin\").split(\",\")[0]) except: return def", "= rayonix_detector # for debugging filenames = [\"/tmp/test_%03d.mccd\" % (i+1) for i in", "Display the message and append it to the error log file. If verbose", "pixel size) get_bin - reply is two integer numbers, e.g. \"2,2\" get_size_bkg -", "from os import access,W_OK return access(pathname,W_OK) def common_topdir(filenames): \"\"\"filenames: list of strings\"\"\" from", "of the server, not locally. \"\"\" self.make_directory(filename) self.write(\"readout,3,\"+remote(filename)) self.last_read = time() def readout_raw(self):", "[\"series queued\"] if (status & 0x02000000) != 0: t+= [\"acquiring series\"] if (status", "return connected(self.ip_address) online = connected def write(self,command): \"\"\"Sends a comman that does not", "the error log file. If verbose logging is enabled, it is also added", "of when the detector was last read. self.last_read = 0.0 # Verbose logging:", "per second as function of bin factor: readout_rate = {1: 2, 2: 10,", "logging is enabled, it is also added to the transcript.\"\"\" from sys import", "& 0x00000010) != 0: t+= [\"integrate queued\"] if (status & 0x00000020) != 0:", "__init__(self,name=None): \"\"\"name: used for IP address, in case there is more than one", "0x0000000F) == 7: t+= [\"error\"] if (status & 0x0000000F) == 8: t+= [\"busy\"]", "20-23: dezinger Each filed contains a 4-bit code, with the following meaning: 0=idle,", "<gh_stars>0 from __future__ import with_statement \"\"\" Remote control of the MAR CCD detector,", "4: server = parts[2] ; share = parts[3] path = \"\" for part", "message[-1] != \"\\n\": message += \"\\n\" t = timestamp() stderr.write(\"%s: %s: %s\" %", "a different user id on the Rayonix control computer, compared # to the", "is None or filename == \"\": return from os.path import dirname directory =", "and not self.ignore_first_trigger: # The detector software does not save to first image,", "However, (as of # Jul 2014, version 0.3.10), the detector still requires 11", "\"read\" # is either \"queued\" or \"executing\" return ((self.state_code() & 0x00000300) != 0)", "and the third parameter (\"Server Arguments\" or \"Personal Name\") set to \"2222\". Or,", "# is either \"queued\" or \"executing\" return ((self.state_code() & 0x00003000) != 0) def", "sleep (0.1) bin_factor = property(get_bin_factor,set_bin_factor, doc=\"Readout X and Y bin factor\") def read_bkg(self):" ]
[ "1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin):", "= [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields", "'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user', 'option'] admin.site.register(Poll, PollAdmin)", "utf-8 -*- from django.conf import settings from django.contrib import admin from django.utils.translation import", "_(u'poll translation') verbose_name_plural = _(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra", "max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural", "verbose_name_plural = _(u'poll options') model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines", "= _(u'poll options') model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines =", "extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan']", "PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations') model = PollTranslation max_num", "django.utils.translation import ugettext_lazy as _ from .models import Poll, PollOption, PollParticipant, PollTranslation class", "verbose_name_plural = _(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra = 1", "translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name", "from django.utils.translation import ugettext_lazy as _ from .models import Poll, PollOption, PollParticipant, PollTranslation", "PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations') model", "PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations')", "as _ from .models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name =", "translation') verbose_name_plural = _(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra =", "_(u'poll option') verbose_name_plural = _(u'poll options') model = PollOption extra = 1 class", "verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations') model = PollTranslation max_num =", "from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from .models import", "class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options') model = PollOption", "['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user', 'option'] admin.site.register(Poll,", "= 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options') model", "import settings from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from", "= 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class", "verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options') model = PollOption extra =", "coding: utf-8 -*- from django.conf import settings from django.contrib import admin from django.utils.translation", "extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options')", "from .models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation')", "django.contrib import admin from django.utils.translation import ugettext_lazy as _ from .models import Poll,", "option') verbose_name_plural = _(u'poll options') model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin):", "= PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display =", "import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural =", "_(u'poll options') model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin,", "= _(u'poll option') verbose_name_plural = _(u'poll options') model = PollOption extra = 1", "list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user',", "= PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll", "Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll", "[PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields =", "from django.conf import settings from django.contrib import admin from django.utils.translation import ugettext_lazy as", "django.conf import settings from django.contrib import admin from django.utils.translation import ugettext_lazy as _", "_(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline):", "PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user', 'option'] admin.site.register(Poll, PollAdmin) admin.site.register(PollParticipant, PollParticipantAdmin)", "admin from django.utils.translation import ugettext_lazy as _ from .models import Poll, PollOption, PollParticipant,", "ugettext_lazy as _ from .models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name", "-*- from django.conf import settings from django.contrib import admin from django.utils.translation import ugettext_lazy", "PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations') model =", "len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll", "options') model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin]", "model = PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display", "PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options') model = PollOption extra", "= _(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class", "# -*- coding: utf-8 -*- from django.conf import settings from django.contrib import admin", "model = PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name =", "PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll',", "inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant", "-*- coding: utf-8 -*- from django.conf import settings from django.contrib import admin from", "import admin from django.utils.translation import ugettext_lazy as _ from .models import Poll, PollOption,", ".models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural", "= _(u'poll translation') verbose_name_plural = _(u'poll translations') model = PollTranslation max_num = len(settings.LANGUAGES)", "PollOption extra = 1 class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__',", "import ugettext_lazy as _ from .models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline):", "_ from .models import Poll, PollOption, PollParticipant, PollTranslation class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll", "class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user', 'option'] admin.site.register(Poll, PollAdmin) admin.site.register(PollParticipant,", "= len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural =", "1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option') verbose_name_plural = _(u'poll options') model =", "class PollTranslationInlineAdmin(admin.StackedInline): verbose_name = _(u'poll translation') verbose_name_plural = _(u'poll translations') model = PollTranslation", "PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model =", "class PollAdmin(admin.ModelAdmin): inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin] list_display = ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model", "settings from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from .models", "= ['__unicode__', 'lan'] class PollParticipantAdmin(admin.ModelAdmin): model = PollParticipant readonly_fields = ['poll', 'user', 'option']", "PollTranslation max_num = len(settings.LANGUAGES) extra = 1 class PollOptionInlineAdmin(admin.StackedInline): verbose_name = _(u'poll option')" ]
[]
[ "Django 2.1.1 on 2019-02-09 01:11 from django.db import migrations, models import django.db.models.deletion class", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('login', '0004_user_settings'), ]", "('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True,", "[ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录',", "null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural':", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录', 'ordering': ['-time'],", "null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录', 'ordering': ['-time'], },", "= [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')),", "primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True,", "Migration(migrations.Migration): initial = True dependencies = [ ('login', '0004_user_settings'), ] operations = [", "by Django 2.1.1 on 2019-02-09 01:11 from django.db import migrations, models import django.db.models.deletion", "operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True,", "to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录', 'ordering': ['-time'], }, ), ]", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录', 'ordering':", "] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time',", "models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True,", "verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('login',", "('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录',", "models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')),", "01:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator',", "Generated by Django 2.1.1 on 2019-02-09 01:11 from django.db import migrations, models import", "2.1.1 on 2019-02-09 01:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')),", "'0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True,", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('login', '0004_user_settings'), ] operations", "= [ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')),", "dependencies = [ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[ ('id',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True,", "# Generated by Django 2.1.1 on 2019-02-09 01:11 from django.db import migrations, models", "initial = True dependencies = [ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel(", "2019-02-09 01:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "[ migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj',", "null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ],", "migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True,", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name':", "('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result', models.TextField(blank=True, null=True, verbose_name='操作结果')), ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User',", "class Migration(migrations.Migration): initial = True dependencies = [ ('login', '0004_user_settings'), ] operations =", "on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')), ], options={ 'verbose_name': '操作记录', 'verbose_name_plural': '操作记录', 'ordering': ['-time'], }, ),", "on 2019-02-09 01:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('login', '0004_user_settings'),", "True dependencies = [ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log', fields=[", "= True dependencies = [ ('login', '0004_user_settings'), ] operations = [ migrations.CreateModel( name='Log',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')), ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')), ('result'," ]
[ "if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground =", "import udon.log import udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for", "False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in range(20): logging.info(\"%d...\", i) time.sleep(1)", "import getopt import logging import sys import time import udon.log import udon.run pidfile", "udon.log import udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt,", "= arg if args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill':", "pidfile = arg if args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] ==", "== 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level", "import udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg", "opt, arg in opts: if opt == '-p': pidfile = arg if args:", "getopt import logging import sys import time import udon.log import udon.run pidfile =", "level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in range(20): logging.info(\"%d...\", i) time.sleep(1) logging.info(\"done\")", "udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in", "import time import udon.log import udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:],", "time import udon.log import udon.run pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\")", "arg if args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile)", "pidfile = None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts:", "opt == '-p': pidfile = arg if args: if args[0] == 'stop': udon.run.stop(pidfile)", "== 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for", "for opt, arg in opts: if opt == '-p': pidfile = arg if", "= False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in range(20): logging.info(\"%d...\", i)", "sys import time import udon.log import udon.run pidfile = None opts, args =", "= getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if opt == '-p': pidfile", "args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if opt == '-p':", "import sys import time import udon.log import udon.run pidfile = None opts, args", "in opts: if opt == '-p': pidfile = arg if args: if args[0]", "args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\")", "getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if opt == '-p': pidfile =", "opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if opt ==", "\"p:\") for opt, arg in opts: if opt == '-p': pidfile = arg", "args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground", "opts: if opt == '-p': pidfile = arg if args: if args[0] ==", "else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in range(20):", "None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if opt", "logging import sys import time import udon.log import udon.run pidfile = None opts,", "elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile)", "args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False,", "<gh_stars>1-10 import getopt import logging import sys import time import udon.log import udon.run", "udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in", "if args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else:", "if opt == '-p': pidfile = arg if args: if args[0] == 'stop':", "arg in opts: if opt == '-p': pidfile = arg if args: if", "udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\")", "import logging import sys import time import udon.log import udon.run pidfile = None", "'-p': pidfile = arg if args: if args[0] == 'stop': udon.run.stop(pidfile) elif args[0]", "'stop': udon.run.stop(pidfile) elif args[0] == 'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level =", "'kill': udon.run.kill(pidfile) else: udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i", "udon.log.init(foreground = False, level = \"DEBUG\") udon.run.daemon(pidfile) logging.info(\"starting\") for i in range(20): logging.info(\"%d...\",", "= None opts, args = getopt.getopt(sys.argv[1:], \"p:\") for opt, arg in opts: if", "== '-p': pidfile = arg if args: if args[0] == 'stop': udon.run.stop(pidfile) elif" ]
[ "= ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for line in all_: for", "def bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число", "системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth())", "isS = True elif 'составное' in numData[1]: isS = False if ',' in", "''' % (num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file, 'r') as", "hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h =", "patterns_list): with open(file, 'r') as f: try: all_ = f.read() all_ = ''.join(all_)", "rct, unus, isEx global isS, suf if 'простое' in numData[1]: isS = True", "False if '<' in numData: suf = True else: suf = False datapiece", "{str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число", "in map(int, line)] return line assert False except: return [] if __name__ ==", "line = line.split(\", \") line = [i for i in map(int, line)] return", "numData[1]: isS = False if ',' in numData: rct = True elif ','", "isEx = suf = False def formData(numData, MC, num): global rct, unus, isEx", "suf if 'простое' in numData[1]: isS = True elif 'составное' in numData[1]: isS", "from itertools import dropwhile def roman(n): if n > 0 and n <", "def check_savings(file, patterns_list): with open(file, 'r') as f: try: all_ = f.read() all_", "> 3999: return \"Ваше число нельзя представить в римской системе счисления\" else: return", "0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds =", "dropwhile def roman(n): if n > 0 and n < 3999: ones =", "import dropwhile def roman(n): if n > 0 and n < 3999: ones", "numData: suf = True else: suf = False datapiece = f''' \"Число %d\":", "def roman(n): if n > 0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"]", "suf = True else: suf = False datapiece = f''' \"Число %d\": \"{MC.dividers()}\",", "line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\",", "True elif 'составное' in numData[1]: isS = False if ',' in numData: rct", "\"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}.", "else: suf = False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()},", "itertools import dropwhile def roman(n): if n > 0 and n < 3999:", "i in map(int, line)] return line assert False except: return [] if __name__", "as f: try: all_ = f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_", "MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file, 'r') as f: try: all_", "return \"Ваше число нельзя представить в римской системе счисления\" else: return f\"Число в", "'<' in numData: suf = True else: suf = False datapiece = f'''", "{str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской", "open(file, 'r') as f: try: all_ = f.read() all_ = ''.join(all_) all_ =", "True if '>' not in numData: isEx = False if '<' in numData:", "простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\":", "необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в", "in numData: rct = False if '.' in numData: unus = True elif", "in numData: unus = True elif '.' not in numData: unus = False", "избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]},", "in all_: for elem in patterns_list: if elem in line: continue else: break", "{ } for j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line =", "{str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе", "= False if '.' in numData: unus = True elif '.' not in", "thounds[n // 1000] h = hunds[n // 100 % 10] te = tens[n", "[i for i in map(int, line)] return line assert False except: return []", "k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct = unus =", "print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('')", "= line.split(\", \") line = [i for i in map(int, line)] return line", "\" def romanize(num): if num < 0 or num > 3999: return \"Ваше", "False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\":", "= True elif ',' not in numData: rct = False if '.' in", "f\"Число в римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной системе", "in numData[1]: isS = False if ',' in numData: rct = True elif", "f: try: all_ = f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_ =", "num): global rct, unus, isEx global isS, suf if 'простое' in numData[1]: isS", "all_ = all_.split('\\n') all_ = reversed(all_) for line in all_: for elem in", "в римской системе счисления\" else: return f\"Число в римской системе счисления: {roman(num)}\" def", "datapiece def check_savings(file, patterns_list): with open(file, 'r') as f: try: all_ = f.read()", "\"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число", "te + o else: return \" - \" def romanize(num): if num <", "= True else: suf = False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число", "character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip('", "= False if ',' in numData: rct = True elif ',' not in", "f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for", "\"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\",", "numData: unus = True elif '.' not in numData: unus = False if", "line = [i for i in map(int, line)] return line assert False except:", "in numData: isEx = False if '<' in numData: suf = True else:", "if ',' in numData: rct = True elif ',' not in numData: rct", "\"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. '''", "not in numData: rct = False if '.' in numData: unus = True", "- \" def romanize(num): if num < 0 or num > 3999: return", "rct = unus = isEx = suf = False def formData(numData, MC, num):", "suf = False def formData(numData, MC, num): global rct, unus, isEx global isS,", "= rct = unus = isEx = suf = False def formData(numData, MC,", "for elem in patterns_list: if elem in line: continue else: break else: character_map", "представить в римской системе счисления\" else: return f\"Число в римской системе счисления: {roman(num)}\"", "n > 0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"]", "if '>' in numData: isEx = True if '>' not in numData: isEx", "',' not in numData: rct = False if '.' in numData: unus =", "True elif '.' not in numData: unus = False if '>' in numData:", "formData(numData, MC, num): global rct, unus, isEx global isS, suf if 'простое' in", "',' in numData: rct = True elif ',' not in numData: rct =", "return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t')", "return f\"Число в римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной", "римской системе счисления\" else: return f\"Число в римской системе счисления: {roman(num)}\" def bitize(num):", "= False if '<' in numData: suf = True else: suf = False", "check_savings(file, patterns_list): with open(file, 'r') as f: try: all_ = f.read() all_ =", "tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n //", "\"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в", "= False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является", "h + te + o else: return \" - \" def romanize(num): if", "not in numData: unus = False if '>' in numData: isEx = True", "and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"]", "elem in line: continue else: break else: character_map = { } for j", "def formData(numData, MC, num): global rct, unus, isEx global isS, suf if 'простое'", "h = hunds[n // 100 % 10] te = tens[n // 10 %", "line in all_: for elem in patterns_list: if elem in line: continue else:", "line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line =", "if 'простое' in numData[1]: isS = True elif 'составное' in numData[1]: isS =", "[\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n", "\"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\":", "10] return t + h + te + o else: return \" -", "число нельзя представить в римской системе счисления\" else: return f\"Число в римской системе", "or num > 3999: return \"Ваше число нельзя представить в римской системе счисления\"", "'простое' in numData[1]: isS = True elif 'составное' in numData[1]: isS = False", "line: continue else: break else: character_map = { } for j in patterns_list[1:]:", "if n > 0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens =", "[\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h = hunds[n // 100 % 10]", "datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()},", "in numData[1]: isS = True elif 'составное' in numData[1]: isS = False if", "isS, suf if 'простое' in numData[1]: isS = True elif 'составное' in numData[1]:", "'>' in numData: isEx = True if '>' not in numData: isEx =", "= f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for line", "for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct = unus", "10] te = tens[n // 10 % 10] o = ones[n % 10]", "enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct = unus = isEx = suf", "\"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def", "\\n').rstrip(' \\n') line = line.split(\", \") line = [i for i in map(int,", "100 % 10] te = tens[n // 10 % 10] o = ones[n", "numData: rct = True elif ',' not in numData: rct = False if", "// 1000] h = hunds[n // 100 % 10] te = tens[n //", "num > 3999: return \"Ваше число нельзя представить в римской системе счисления\" else:", "in numData: rct = True elif ',' not in numData: rct = False", "= [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h = hunds[n", "line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \")", "unus = False if '>' in numData: isEx = True if '>' not", "numData: isEx = True if '>' not in numData: isEx = False if", "te = tens[n // 10 % 10] o = ones[n % 10] return", "{str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file, 'r')", "= { } for j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line", "<reponame>theneon-Hacker/main_numProperty from itertools import dropwhile def roman(n): if n > 0 and n", "\"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\":", "\"Ваше число нельзя представить в римской системе счисления\" else: return f\"Число в римской", "print(numData[k], end='\\n\\t') print('') isS = rct = unus = isEx = suf =", "\") line = [i for i in map(int, line)] return line assert False", "< 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds =", "ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t", "% 10] te = tens[n // 10 % 10] o = ones[n %", "if '>' not in numData: isEx = False if '<' in numData: suf", "''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for line in all_: for elem", "in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct = unus = isEx =", "системе счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def", "+ o else: return \" - \" def romanize(num): if num < 0", "% 10] return t + h + te + o else: return \"", "all_: for elem in patterns_list: if elem in line: continue else: break else:", "= line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line =", "= [i for i in map(int, line)] return line assert False except: return", "10 % 10] o = ones[n % 10] return t + h +", "line)] return line assert False except: return [] if __name__ == '__main__': pass", "with open(file, 'r') as f: try: all_ = f.read() all_ = ''.join(all_) all_", "try: all_ = f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_)", "return \" - \" def romanize(num): if num < 0 or num >", "t = thounds[n // 1000] h = hunds[n // 100 % 10] te", "} for j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\",", "{}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct", "True else: suf = False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\":", "break else: character_map = { } for j in patterns_list[1:]: character_map.update({ord(j): ''}) line", "for i in map(int, line)] return line assert False except: return [] if", "\"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе", "num < 0 or num > 3999: return \"Ваше число нельзя представить в", "счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData):", "0 or num > 3999: return \"Ваше число нельзя представить в римской системе", "else: return \" - \" def romanize(num): if num < 0 or num", "= True if '>' not in numData: isEx = False if '<' in", "patterns_list: if elem in line: continue else: break else: character_map = { }", "unus = True elif '.' not in numData: unus = False if '>'", "= True elif 'составное' in numData[1]: isS = False if ',' in numData:", "счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return", "roman(n): if n > 0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens", "reversed(all_) for line in all_: for elem in patterns_list: if elem in line:", "= [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h = hunds[n // 100 %", "{romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece", "недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной", "// 100 % 10] te = tens[n // 10 % 10] o =", "in numData: isEx = True if '>' not in numData: isEx = False", "римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num,", "в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k,", "{str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()},", "else: return f\"Число в римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число в", "elif ',' not in numData: rct = False if '.' in numData: unus", "= tens[n // 10 % 10] o = ones[n % 10] return t", "in patterns_list: if elem in line: continue else: break else: character_map = {", "in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line =", "all_ = ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for line in all_:", "системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with", "item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct = unus = isEx", "f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for line in", "line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line = [i for i in", "elif '.' not in numData: unus = False if '>' in numData: isEx", "else: character_map = { } for j in patterns_list[1:]: character_map.update({ord(j): ''}) line =", "прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()},", "numData: isEx = False if '<' in numData: suf = True else: suf", "for line in all_: for elem in patterns_list: if elem in line: continue", "unus, isEx global isS, suf if 'простое' in numData[1]: isS = True elif", "3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"]", "all_ = reversed(all_) for line in all_: for elem in patterns_list: if elem", "else: break else: character_map = { } for j in patterns_list[1:]: character_map.update({ord(j): ''})", "print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS =", "{roman(num)}\" def bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num):", "rct = False if '.' in numData: unus = True elif '.' not", "not in numData: isEx = False if '<' in numData: suf = True", "unus = isEx = suf = False def formData(numData, MC, num): global rct,", "return datapiece def check_savings(file, patterns_list): with open(file, 'r') as f: try: all_ =", "elem in patterns_list: if elem in line: continue else: break else: character_map =", "3999: return \"Ваше число нельзя представить в римской системе счисления\" else: return f\"Число", "\"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\",", "% 10] o = ones[n % 10] return t + h + te", "\"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число в римской системе счисления\":", "(num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file, 'r') as f: try:", "map(int, line)] return line assert False except: return [] if __name__ == '__main__':", "10] o = ones[n % 10] return t + h + te +", "f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число -", "'>' not in numData: isEx = False if '<' in numData: suf =", "all_.split('\\n') all_ = reversed(all_) for line in all_: for elem in patterns_list: if", "if elem in line: continue else: break else: character_map = { } for", "end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS = rct =", "= isEx = suf = False def formData(numData, MC, num): global rct, unus,", "rct = True elif ',' not in numData: rct = False if '.'", "False if '.' in numData: unus = True elif '.' not in numData:", "системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item in", "for j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '')", "двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item", "elif 'составное' in numData[1]: isS = False if ',' in numData: rct =", "= reversed(all_) for line in all_: for elem in patterns_list: if elem in", "'.' not in numData: unus = False if '>' in numData: isEx =", "in numData: unus = False if '>' in numData: isEx = True if", "j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line", "является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\":", "character_map = { } for j in patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map)", "= line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line = [i for i", "[\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h = hunds[n //", "end='\\n\\t') print('') isS = rct = unus = isEx = suf = False", "o else: return \" - \" def romanize(num): if num < 0 or", "- необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\", \"{MC.repr_sqrt2()}\", \"Число", "= [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000]", "False def formData(numData, MC, num): global rct, unus, isEx global isS, suf if", "+ te + o else: return \" - \" def romanize(num): if num", "> 0 and n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds", "ones[n % 10] return t + h + te + o else: return", "= f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число", "= all_.split('\\n') all_ = reversed(all_) for line in all_: for elem in patterns_list:", "global isS, suf if 'простое' in numData[1]: isS = True elif 'составное' in", "line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line", "continue else: break else: character_map = { } for j in patterns_list[1:]: character_map.update({ord(j):", "False if '>' in numData: isEx = True if '>' not in numData:", "= False def formData(numData, MC, num): global rct, unus, isEx global isS, suf", "line.split(\", \") line = [i for i in map(int, line)] return line assert", "'') line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line = [i", "return t + h + te + o else: return \" - \"", "'r') as f: try: all_ = f.read() all_ = ''.join(all_) all_ = all_.split('\\n')", "< 0 or num > 3999: return \"Ваше число нельзя представить в римской", "if '.' in numData: unus = True elif '.' not in numData: unus", "= hunds[n // 100 % 10] te = tens[n // 10 % 10]", "\" - \" def romanize(num): if num < 0 or num > 3999:", "t + h + te + o else: return \" - \" def", "def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t')", "= thounds[n // 1000] h = hunds[n // 100 % 10] te =", "tens[n // 10 % 10] o = ones[n % 10] return t +", "line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line = [i for", "1000] h = hunds[n // 100 % 10] te = tens[n // 10", "isEx = True if '>' not in numData: isEx = False if '<'", "MC, num): global rct, unus, isEx global isS, suf if 'простое' in numData[1]:", "num): print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k], end='\\n\\t') print('') isS", "[\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h", "isS = False if ',' in numData: rct = True elif ',' not", "if num < 0 or num > 3999: return \"Ваше число нельзя представить", "% (num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file, 'r') as f:", "= unus = isEx = suf = False def formData(numData, MC, num): global", "isS = rct = unus = isEx = suf = False def formData(numData,", "счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def check_savings(file, patterns_list): with open(file,", "isEx global isS, suf if 'простое' in numData[1]: isS = True elif 'составное'", "'.' in numData: unus = True elif '.' not in numData: unus =", "= [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t =", "римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\"", "системе счисления\" else: return f\"Число в римской системе счисления: {roman(num)}\" def bitize(num): return", "'составное' in numData[1]: isS = False if ',' in numData: rct = True", "global rct, unus, isEx global isS, suf if 'простое' in numData[1]: isS =", "в римской системе счисления\": {romanize(num)[35:]}, \"Число в двоичной системе счисления\": {str(bin(num))[2:]}. ''' %", "= True elif '.' not in numData: unus = False if '>' in", "в римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной системе счисления:", "numData: unus = False if '>' in numData: isEx = True if '>'", "thounds = [\"\",\"M\",\"MM\",\"MMM\",\"MMMM\"] t = thounds[n // 1000] h = hunds[n // 100", "bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num),", "= False if '>' in numData: isEx = True if '>' not in", "= ones[n % 10] return t + h + te + o else:", "счисления: {roman(num)}\" def bitize(num): return f\"Число в двоичной системе счисления: {str(bin(num))[2:]}\" def print_all(numData,", "в двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def check_savings(file,", "patterns_list[1:]: character_map.update({ord(j): ''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip('", "suf = False datapiece = f''' \"Число %d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число", "= suf = False def formData(numData, MC, num): global rct, unus, isEx global", "{str(bin(num))[2:]}\" def print_all(numData, num): print('Число {}:'.format(num), end='\\n\\t') for k, item in enumerate(numData): print(numData[k],", "if '<' in numData: suf = True else: suf = False datapiece =", "%d\": \"{MC.dividers()}\", \"Число простое\": {str(isS).lower()}, \"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()},", "numData: rct = False if '.' in numData: unus = True elif '.'", "all_ = f.read() all_ = ''.join(all_) all_ = all_.split('\\n') all_ = reversed(all_) for", "o = ones[n % 10] return t + h + te + o", "\\n') line = line.split(\", \") line = [i for i in map(int, line)]", "нельзя представить в римской системе счисления\" else: return f\"Число в римской системе счисления:", "{str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число недостаточное\": {str(suf).lower()}, \"{MC.repr_pow2()}\",", "True elif ',' not in numData: rct = False if '.' in numData:", "print('') isS = rct = unus = isEx = suf = False def", "in line: continue else: break else: character_map = { } for j in", "+ h + te + o else: return \" - \" def romanize(num):", "''}) line = line.translate(character_map) line = line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n')", "romanize(num): if num < 0 or num > 3999: return \"Ваше число нельзя", "hunds[n // 100 % 10] te = tens[n // 10 % 10] o", "in numData: suf = True else: suf = False datapiece = f''' \"Число", "isEx = False if '<' in numData: suf = True else: suf =", "счисления\" else: return f\"Число в римской системе счисления: {roman(num)}\" def bitize(num): return f\"Число", "двоичной системе счисления\": {str(bin(num))[2:]}. ''' % (num, MC.smooth()) return datapiece def check_savings(file, patterns_list):", "= line.replace(f\"{patterns_list[0]}\", '') line = line.lstrip(' \\n').rstrip(' \\n') line = line.split(\", \") line", "// 10 % 10] o = ones[n % 10] return t + h", "numData[1]: isS = True elif 'составное' in numData[1]: isS = False if ','", "\"Число является прямоугольным\": {str(rct).lower()}, \"Число - необычное\": {str(unus).lower()}, \"%s\", \"Число избыточное\": {str(isEx).lower()}, \"Число", "def romanize(num): if num < 0 or num > 3999: return \"Ваше число", "n < 3999: ones = [\"\",\"I\",\"II\",\"III\",\"IV\",\"V\",\"VI\",\"VII\",\"VIII\",\"IX\"] tens = [\"\",\"X\",\"XX\",\"XXX\",\"XL\",\"L\",\"LX\",\"LXX\",\"LXXX\",\"XC\"] hunds = [\"\",\"C\",\"CC\",\"CCC\",\"CD\",\"D\",\"DC\",\"DCC\",\"DCCC\",\"CM\"] thounds", "False if ',' in numData: rct = True elif ',' not in numData:" ]
[ "\"r\") as fichier: JSON = fichier.read() config.load(JSON) #import des classes et objet for", "import SourceFileLoader from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration ############################################################# # #", "de configuration des modules with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON =", "lobjet = {} #liste des classes lmodrules = [] #liste des modules de", "#liste des classes lmodrules = [] #liste des modules de regle config =", "des classes et objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory +item+\".py\").load_module(), item)", "# CHARGEMENT DU FICHIER de configuration des modules with open(JSONdirectory + \"module.json\", \"r\")", "= fichier.read() config.load(JSON) #import des classes et objet for item in config.getlitem(): temp", "de regle config = configuration() # CHARGEMENT DU FICHIER de configuration des modules", "objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory +item+\".py\").load_module(), item) lobjet[item] = temp()", "et objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory +item+\".py\").load_module(), item) lobjet[item] =", "configuration des modules with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON = fichier.read()", "with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON) #import des", "configuration ############################################################# # # INITIALISATION ############################################################# lobjet = {} #liste des classes lmodrules", "des classes lmodrules = [] #liste des modules de regle config = configuration()", "config.load(JSON) #import des classes et objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory", "regle config = configuration() # CHARGEMENT DU FICHIER de configuration des modules with", "\"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON) #import des classes et objet", "# INITIALISATION ############################################################# lobjet = {} #liste des classes lmodrules = [] #liste", "{} #liste des classes lmodrules = [] #liste des modules de regle config", "import * from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION ############################################################# lobjet =", "#import des classes et objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory +item+\".py\").load_module(),", "from importlib.machinery import SourceFileLoader from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration #############################################################", "as fichier: JSON = fichier.read() config.load(JSON) #import des classes et objet for item", "des modules with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON)", "from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION #############################################################", "= configuration() # CHARGEMENT DU FICHIER de configuration des modules with open(JSONdirectory +", "importlib.machinery import SourceFileLoader from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration ############################################################# #", "import configuration ############################################################# # # INITIALISATION ############################################################# lobjet = {} #liste des classes", "= {} #liste des classes lmodrules = [] #liste des modules de regle", "############################################################# lobjet = {} #liste des classes lmodrules = [] #liste des modules", "############################################################# # # INITIALISATION ############################################################# lobjet = {} #liste des classes lmodrules =", "configuration() # CHARGEMENT DU FICHIER de configuration des modules with open(JSONdirectory + \"module.json\",", "lmodrules = [] #liste des modules de regle config = configuration() # CHARGEMENT", "CHARGEMENT DU FICHIER de configuration des modules with open(JSONdirectory + \"module.json\", \"r\") as", "FICHIER de configuration des modules with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON", "modules with open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON) #import", "classes et objet for item in config.getlitem(): temp = getattr(SourceFileLoader(item,Moduledirectory +item+\".py\").load_module(), item) lobjet[item]", "<reponame>0CT3T/Daemon_Home_Integration from importlib.machinery import SourceFileLoader from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration", "from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION ############################################################# lobjet = {} #liste", "* from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION ############################################################# lobjet = {}", "[] #liste des modules de regle config = configuration() # CHARGEMENT DU FICHIER", "modules de regle config = configuration() # CHARGEMENT DU FICHIER de configuration des", "config = configuration() # CHARGEMENT DU FICHIER de configuration des modules with open(JSONdirectory", "= [] #liste des modules de regle config = configuration() # CHARGEMENT DU", "DU FICHIER de configuration des modules with open(JSONdirectory + \"module.json\", \"r\") as fichier:", "# # INITIALISATION ############################################################# lobjet = {} #liste des classes lmodrules = []", "SourceFileLoader from daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION", "INITIALISATION ############################################################# lobjet = {} #liste des classes lmodrules = [] #liste des", "#liste des modules de regle config = configuration() # CHARGEMENT DU FICHIER de", "+ \"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON) #import des classes et", "fichier: JSON = fichier.read() config.load(JSON) #import des classes et objet for item in", "open(JSONdirectory + \"module.json\", \"r\") as fichier: JSON = fichier.read() config.load(JSON) #import des classes", "daemon.Configuration.Modele import * from daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION ############################################################# lobjet", "fichier.read() config.load(JSON) #import des classes et objet for item in config.getlitem(): temp =", "des modules de regle config = configuration() # CHARGEMENT DU FICHIER de configuration", "JSON = fichier.read() config.load(JSON) #import des classes et objet for item in config.getlitem():", "daemon.Configuration.configuration import configuration ############################################################# # # INITIALISATION ############################################################# lobjet = {} #liste des", "classes lmodrules = [] #liste des modules de regle config = configuration() #" ]
[ "print(stk) if stk[-1] == sym: stk.pop() else: print('INV', sym) err = True break", "in f.read().strip().split('\\n')] in_nums = [] total = 0 result = 0 other =", "l in f.read().strip().split('\\n')] in_nums = [] total = 0 result = 0 other", "']', '{': '}', '<': '>'} cls_to_opn = {v: k for k, v in", "v in opn_to_cls.items()} scores = { ')': 3, ']': 57, '}': 1197, '>':", "in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else: print('INV', sym) err =", "stk.pop() else: print('INV', sym) err = True break if err: total += scores[sym]", "sym in syms: print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk)", "False for sym in syms: print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]]", "for i in itr] with open(sys.argv[1], 'r') as f: file_lines = [l for", "']': 57, '}': 1197, '>': 25137 } while True: for l in file_lines:", "while True: for l in file_lines: stk = [] syms = list(l) err", "sym: stk.pop() else: print('INV', sym) err = True break if err: total +=", "opn_to_cls = {'(': ')', '[': ']', '{': '}', '<': '>'} cls_to_opn = {v:", "= [] syms = list(l) err = False for sym in syms: print(sym)", "0 result = 0 other = 0 opn_to_cls = {'(': ')', '[': ']',", "= list(l) err = False for sym in syms: print(sym) if sym in", "list(l) err = False for sym in syms: print(sym) if sym in list('([{<'):", "print('INV', sym) err = True break if err: total += scores[sym] break print(f'Total:", "= {v: k for k, v in opn_to_cls.items()} scores = { ')': 3,", "f.read().strip().split('\\n')] in_nums = [] total = 0 result = 0 other = 0", "as f: file_lines = [l for l in f.read().strip().split('\\n')] in_nums = [] total", "#!/usr/bin/env python3 import sys def ints(itr): return [int(i) for i in itr] with", "stk = [] syms = list(l) err = False for sym in syms:", "stk[-1] == sym: stk.pop() else: print('INV', sym) err = True break if err:", "cls_to_opn = {v: k for k, v in opn_to_cls.items()} scores = { ')':", "in syms: print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and", "if sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn:", "err = True break if err: total += scores[sym] break print(f'Total: {total}') print(f'Result:", "sym in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else: print('INV', sym) err", "in_nums = [] total = 0 result = 0 other = 0 opn_to_cls", "0 opn_to_cls = {'(': ')', '[': ']', '{': '}', '<': '>'} cls_to_opn =", "for l in file_lines: stk = [] syms = list(l) err = False", "= 0 other = 0 opn_to_cls = {'(': ')', '[': ']', '{': '}',", "0 other = 0 opn_to_cls = {'(': ')', '[': ']', '{': '}', '<':", "other = 0 opn_to_cls = {'(': ')', '[': ']', '{': '}', '<': '>'}", "k for k, v in opn_to_cls.items()} scores = { ')': 3, ']': 57,", "')', '[': ']', '{': '}', '<': '>'} cls_to_opn = {v: k for k,", "sys def ints(itr): return [int(i) for i in itr] with open(sys.argv[1], 'r') as", "3, ']': 57, '}': 1197, '>': 25137 } while True: for l in", "itr] with open(sys.argv[1], 'r') as f: file_lines = [l for l in f.read().strip().split('\\n')]", "l in file_lines: stk = [] syms = list(l) err = False for", "for k, v in opn_to_cls.items()} scores = { ')': 3, ']': 57, '}':", "else: print('INV', sym) err = True break if err: total += scores[sym] break", "= [] total = 0 result = 0 other = 0 opn_to_cls =", "'>'} cls_to_opn = {v: k for k, v in opn_to_cls.items()} scores = {", "'[': ']', '{': '}', '<': '>'} cls_to_opn = {v: k for k, v", "'}', '<': '>'} cls_to_opn = {v: k for k, v in opn_to_cls.items()} scores", "{v: k for k, v in opn_to_cls.items()} scores = { ')': 3, ']':", "with open(sys.argv[1], 'r') as f: file_lines = [l for l in f.read().strip().split('\\n')] in_nums", "57, '}': 1197, '>': 25137 } while True: for l in file_lines: stk", "= True break if err: total += scores[sym] break print(f'Total: {total}') print(f'Result: {result}')", "import sys def ints(itr): return [int(i) for i in itr] with open(sys.argv[1], 'r')", "and sym in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else: print('INV', sym)", "file_lines: stk = [] syms = list(l) err = False for sym in", "in file_lines: stk = [] syms = list(l) err = False for sym", "f: file_lines = [l for l in f.read().strip().split('\\n')] in_nums = [] total =", "25137 } while True: for l in file_lines: stk = [] syms =", "syms = list(l) err = False for sym in syms: print(sym) if sym", "'<': '>'} cls_to_opn = {v: k for k, v in opn_to_cls.items()} scores =", "k, v in opn_to_cls.items()} scores = { ')': 3, ']': 57, '}': 1197,", "file_lines = [l for l in f.read().strip().split('\\n')] in_nums = [] total = 0", "= { ')': 3, ']': 57, '}': 1197, '>': 25137 } while True:", "[opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop()", "len(stk) and sym in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else: print('INV',", "for l in f.read().strip().split('\\n')] in_nums = [] total = 0 result = 0", "'}': 1197, '>': 25137 } while True: for l in file_lines: stk =", "stk += [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk) if stk[-1] ==", "cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else: print('INV', sym) err = True", "total = 0 result = 0 other = 0 opn_to_cls = {'(': ')',", "result = 0 other = 0 opn_to_cls = {'(': ')', '[': ']', '{':", "+= [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk) if stk[-1] == sym:", "'r') as f: file_lines = [l for l in f.read().strip().split('\\n')] in_nums = []", "{'(': ')', '[': ']', '{': '}', '<': '>'} cls_to_opn = {v: k for", "elif len(stk) and sym in cls_to_opn: print(stk) if stk[-1] == sym: stk.pop() else:", "if stk[-1] == sym: stk.pop() else: print('INV', sym) err = True break if", "= 0 result = 0 other = 0 opn_to_cls = {'(': ')', '[':", "in itr] with open(sys.argv[1], 'r') as f: file_lines = [l for l in", "[int(i) for i in itr] with open(sys.argv[1], 'r') as f: file_lines = [l", "')': 3, ']': 57, '}': 1197, '>': 25137 } while True: for l", "True break if err: total += scores[sym] break print(f'Total: {total}') print(f'Result: {result}') print(f'Other:", "= [l for l in f.read().strip().split('\\n')] in_nums = [] total = 0 result", "[l for l in f.read().strip().split('\\n')] in_nums = [] total = 0 result =", "return [int(i) for i in itr] with open(sys.argv[1], 'r') as f: file_lines =", "ints(itr): return [int(i) for i in itr] with open(sys.argv[1], 'r') as f: file_lines", "open(sys.argv[1], 'r') as f: file_lines = [l for l in f.read().strip().split('\\n')] in_nums =", "True: for l in file_lines: stk = [] syms = list(l) err =", "list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk) if stk[-1]", "[] total = 0 result = 0 other = 0 opn_to_cls = {'(':", "i in itr] with open(sys.argv[1], 'r') as f: file_lines = [l for l", "'{': '}', '<': '>'} cls_to_opn = {v: k for k, v in opn_to_cls.items()}", "1197, '>': 25137 } while True: for l in file_lines: stk = []", "break if err: total += scores[sym] break print(f'Total: {total}') print(f'Result: {result}') print(f'Other: {other}')", "scores = { ')': 3, ']': 57, '}': 1197, '>': 25137 } while", "sym) err = True break if err: total += scores[sym] break print(f'Total: {total}')", "in opn_to_cls.items()} scores = { ')': 3, ']': 57, '}': 1197, '>': 25137", "[] syms = list(l) err = False for sym in syms: print(sym) if", "for sym in syms: print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]] elif", "= 0 opn_to_cls = {'(': ')', '[': ']', '{': '}', '<': '>'} cls_to_opn", "opn_to_cls.items()} scores = { ')': 3, ']': 57, '}': 1197, '>': 25137 }", "= {'(': ')', '[': ']', '{': '}', '<': '>'} cls_to_opn = {v: k", "sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk)", "== sym: stk.pop() else: print('INV', sym) err = True break if err: total", "err = False for sym in syms: print(sym) if sym in list('([{<'): stk", "{ ')': 3, ']': 57, '}': 1197, '>': 25137 } while True: for", "print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym in", "= False for sym in syms: print(sym) if sym in list('([{<'): stk +=", "in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym in cls_to_opn: print(stk) if", "def ints(itr): return [int(i) for i in itr] with open(sys.argv[1], 'r') as f:", "syms: print(sym) if sym in list('([{<'): stk += [opn_to_cls[sym]] elif len(stk) and sym", "} while True: for l in file_lines: stk = [] syms = list(l)", "'>': 25137 } while True: for l in file_lines: stk = [] syms", "python3 import sys def ints(itr): return [int(i) for i in itr] with open(sys.argv[1]," ]
[ "<gh_stars>1-10 class OriginChannels_StandIN(): def __init__(self): pass def get_channels(self): return [] def get_channel_stream(self, chandict):", "OriginChannels_StandIN(): def __init__(self): pass def get_channels(self): return [] def get_channel_stream(self, chandict): return None", "class OriginChannels_StandIN(): def __init__(self): pass def get_channels(self): return [] def get_channel_stream(self, chandict): return" ]
[ "i, path in enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker +=", "figure out the frame rate for any # particular second. Save the image", "if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir) exp_root = '/run/media/alex/Alex4/lighttable-data'", "{len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else:", "ext = file.rsplit('.', 1) key = (exp_code, step, period) if key in time_dict:", "for key in time_dict: (exp_code, step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found", "key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths", "= Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records. Ignores duplicates. with npy_path.open()", "# The FrameTimesCrawler navigates through the backup data drives and # collects all", "the frame rate for any # particular second. Save the image times in", "0 for i, path in enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker)", "print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path in", "Save the image times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger =", "key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename =", "{len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\")", "any # particular second. Save the image times in text files def __init__(self,", "if npy_path.is_file(): # Add to existing records. Ignores duplicates. with npy_path.open() as fid:", "[] for key in time_dict: (exp_code, step, period) = key times = np.sort(np.array(time_dict[key]))", "n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period, file =", "# Add to existing records. Ignores duplicates. with npy_path.open() as fid: existing_paths =", "npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler", "to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files", "Helpyr import data_loading from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from logger", "Get the run parameters and frame times. Store in dict for now. self.logger.write(f\"Extracting", "else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for key", "= destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths", "parameters and frame times. Store in dict for now. self.logger.write(f\"Extracting run info\") time_dict", "npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records. Ignores duplicates. with", "FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the backup data drives and #", "helpyr_misc import ensure_dir_exists from logger import Logger from crawler import Crawler class FrameTimesCrawler", "def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get", "and # collects all the image names. The names are timestamps for when", "where taken, therefore can be used to figure out the frame rate for", "helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from logger import Logger from crawler", "= lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path in enumerate(paths):", "(exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing", "frame times. Store in dict for now. self.logger.write(f\"Extracting run info\") time_dict = {}", "# Get the run parameters and frame times. Store in dict for now.", "self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\":", "existing records. Ignores duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)}", "= list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid:", "{exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath)", "step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times", "second. Save the image times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger", "run info\") time_dict = {} n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\",", "exp_code, step, period, file = nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key", "os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records. Ignores", "from logger import Logger from crawler import Crawler class FrameTimesCrawler (Crawler): # The", "duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)}", "np from pathlib import Path # From Helpyr import data_loading from helpyr_misc import", "self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False)", "end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the", "Path # From Helpyr import data_loading from helpyr_misc import nsplit from helpyr_misc import", "the image times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath,", "time_str, ext = file.rsplit('.', 1) key = (exp_code, step, period) if key in", "as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths", "Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the backup data drives", "fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir)", "self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths)))", "The names are timestamps for when images # where taken, therefore can be", "= self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def", "all the image names. The names are timestamps for when images # where", "be used to figure out the frame rate for any # particular second.", "the run parameters and frame times. Store in dict for now. self.logger.write(f\"Extracting run", "times files\") self.logger.increase_global_indent() npy_paths = [] for key in time_dict: (exp_code, step, period)", "os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath)", "import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the backup data", "set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\")", "from crawler import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the", "= nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key = (exp_code, step, period)", "numpy as np from pathlib import Path # From Helpyr import data_loading from", "names are timestamps for when images # where taken, therefore can be used", "fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir) exp_root", "fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths =", "Logger from crawler import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through", "# From Helpyr import data_loading from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists", "for any # particular second. Save the image times in text files def", "particular second. Save the image times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"):", "self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the run parameters", "os import numpy as np from pathlib import Path # From Helpyr import", "times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\"", "to figure out the frame rate for any # particular second. Save the", "self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to", "step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step}", "= fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths)", "default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def", "when images # where taken, therefore can be used to figure out the", "The FrameTimesCrawler navigates through the backup data drives and # collects all the", "for now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths = len(paths) print_perc =", "from helpyr_misc import ensure_dir_exists from logger import Logger from crawler import Crawler class", "print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period, file = nsplit(path, 4) time_str,", "= Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir,", "nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key = (exp_code, step, period) if", "therefore can be used to figure out the frame rate for any #", "file.rsplit('.', 1) key = (exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else:", "destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths =", "f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt')", "times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if", "i_tracker += 0.1 _, exp_code, step, period, file = nsplit(path, 4) time_str, ext", "npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\")", "path in enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1", "step, period, file = nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key =", "through the backup data drives and # collects all the image names. The", "files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] =", "__init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif')", "period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\")", "images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath,", "paths = self.file_list # Get the run parameters and frame times. Store in", "are timestamps for when images # where taken, therefore can be used to", "to existing records. Ignores duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding", "'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records. Ignores duplicates.", "npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as", "= [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for key in time_dict:", "fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) |", ">= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period, file = nsplit(path,", "{} n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0", "the image names. The names are timestamps for when images # where taken,", "from pathlib import Path # From Helpyr import data_loading from helpyr_misc import nsplit", "if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent()", "image names. The names are timestamps for when images # where taken, therefore", "files\") self.logger.increase_global_indent() npy_paths = [] for key in time_dict: (exp_code, step, period) =", "self.logger.increase_global_indent() npy_paths = [] for key in time_dict: (exp_code, step, period) = key", "backup data drives and # collects all the image names. The names are", "can be used to figure out the frame rate for any # particular", "period, file = nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key = (exp_code,", "= os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records.", "timestamps for when images # where taken, therefore can be used to figure", "if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step,", "in dict for now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths = len(paths)", "in enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _,", "the backup data drives and # collects all the image names. The names", "for i, path in enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker", "self.file_list # Get the run parameters and frame times. Store in dict for", "time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = []", "logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self)", "= key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename", "= 0 for i, path in enumerate(paths): if (i / n_paths) >= i_tracker:", "= os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path =", "import Logger from crawler import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates", "= file.rsplit('.', 1) key = (exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str))", "in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths =", "and frame times. Store in dict for now. self.logger.write(f\"Extracting run info\") time_dict =", "(i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period,", "[np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for key in time_dict: (exp_code,", "navigates through the backup data drives and # collects all the image names.", "= len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i,", "end='\\r') i_tracker = 0 for i, path in enumerate(paths): if (i / n_paths)", "run parameters and frame times. Store in dict for now. self.logger.write(f\"Extracting run info\")", "i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period, file = nsplit(path, 4)", "drives and # collects all the image names. The names are timestamps for", "with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing", "logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir", "Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger)", "4) time_str, ext = file.rsplit('.', 1) key = (exp_code, step, period) if key", "= (exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)]", "pathlib import Path # From Helpyr import data_loading from helpyr_misc import nsplit from", "in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger)", "frame rate for any # particular second. Save the image times in text", "= {} n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker =", "written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir =", "with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times'", "time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for key in", "data drives and # collects all the image names. The names are timestamps", "time_dict = {} n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker", "nsplit from helpyr_misc import ensure_dir_exists from logger import Logger from crawler import Crawler", "self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output()", "records. Ignores duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files", "def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the run parameters and", "(exp_code, step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code}", "ensure_dir_exists from logger import Logger from crawler import Crawler class FrameTimesCrawler (Crawler): #", "info\") time_dict = {} n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r')", "in time_dict: (exp_code, step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images", "collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the run parameters and frame", "from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from logger import Logger from", "out the frame rate for any # particular second. Save the image times", "as np from pathlib import Path # From Helpyr import data_loading from helpyr_misc", "= [] for key in time_dict: (exp_code, step, period) = key times =", "len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path", "for when images # where taken, therefore can be used to figure out", "self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list #", "Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the run", "npy_path.is_file(): # Add to existing records. Ignores duplicates. with npy_path.open() as fid: existing_paths", "FrameTimesCrawler navigates through the backup data drives and # collects all the image", "now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths = len(paths) print_perc = lambda", "text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times']", "# particular second. Save the image times in text files def __init__(self, destination_dir,", "1) key = (exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key]", "times. Store in dict for now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths", "{period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath", "files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w')", "self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self):", "i_tracker = 0 for i, path in enumerate(paths): if (i / n_paths) >=", "time_dict: (exp_code, step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for", "import numpy as np from pathlib import Path # From Helpyr import data_loading", "_, exp_code, step, period, file = nsplit(path, 4) time_str, ext = file.rsplit('.', 1)", "period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\")", "np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath =", "Path(npy_list_filepath) if npy_path.is_file(): # Add to existing records. Ignores duplicates. with npy_path.open() as", "data_loading from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from logger import Logger", "Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self):", "key in time_dict: (exp_code, step, period) = key times = np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)}", "lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path in enumerate(paths): if", "file = nsplit(path, 4) time_str, ext = file.rsplit('.', 1) key = (exp_code, step,", "self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir) exp_root =", "= self.file_list # Get the run parameters and frame times. Store in dict", "files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)}", "import Path # From Helpyr import data_loading from helpyr_misc import nsplit from helpyr_misc", "list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths))", "def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times", "set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__", "import nsplit from helpyr_misc import ensure_dir_exists from logger import Logger from crawler import", "print() paths = self.file_list # Get the run parameters and frame times. Store", "Store in dict for now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths =", "times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self,", "__name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir) exp_root = '/run/media/alex/Alex4/lighttable-data' crawler.set_root(exp_root)", "files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir", "import os import numpy as np from pathlib import Path # From Helpyr", "ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print() paths = self.file_list", "n_paths = len(paths) print_perc = lambda p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for", "0.1 _, exp_code, step, period, file = nsplit(path, 4) time_str, ext = file.rsplit('.',", "for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times)", "npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add", "np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file():", "image times in text files def __init__(self, destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True)", "self.logger.write(f\"Extracting run info\") time_dict = {} n_paths = len(paths) print_perc = lambda p:", "times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath =", "{step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent()", "Ignores duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to", "existing files.\") npy_paths = list(filter(None, set(existing_paths) | set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with", "== \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler = FrameTimesCrawler(destination_dir) exp_root = '/run/media/alex/Alex4/lighttable-data' crawler.set_root(exp_root) crawler.run()", "rate for any # particular second. Save the image times in text files", "(Crawler): # The FrameTimesCrawler navigates through the backup data drives and # collects", "enumerate(paths): if (i / n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code,", "npy_paths = [] for key in time_dict: (exp_code, step, period) = key times", "= np.sort(np.array(time_dict[key])) self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath", "{len(times)} images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename)", "Add to existing records. Ignores duplicates. with npy_path.open() as fid: existing_paths = fid.read().splitlines()", "print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path in enumerate(paths): if (i /", "/ n_paths) >= i_tracker: print_perc(i_tracker) i_tracker += 0.1 _, exp_code, step, period, file", "used to figure out the frame rate for any # particular second. Save", "images # where taken, therefore can be used to figure out the frame", "crawler import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the backup", "log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir =", "dict for now. self.logger.write(f\"Extracting run info\") time_dict = {} n_paths = len(paths) print_perc", "# collects all the image names. The names are timestamps for when images", "names. The names are timestamps for when images # where taken, therefore can", "key = (exp_code, step, period) if key in time_dict: time_dict[key].append(np.float(time_str)) else: time_dict[key] =", "# where taken, therefore can be used to figure out the frame rate", "self.collect_names(verbose_file_list=False) print() paths = self.file_list # Get the run parameters and frame times.", "times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path", "self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for key in time_dict: (exp_code, step,", "existing_paths = fid.read().splitlines() self.logger.write(f\"Adding {len(time_dict)} files to {len(existing_paths)} existing files.\") npy_paths = list(filter(None,", "+= 0.1 _, exp_code, step, period, file = nsplit(path, 4) time_str, ext =", "destination_dir, log_filepath=\"./log-files/frame-name-crawler.txt\"): logger = Logger(log_filepath, default_verbose=True) Crawler.__init__(self, logger) self.mode_dict['collect_frame_times'] = self.collect_frame_times self.set_target_names('*.tif') self.destination_dir", "time_dict[key].append(np.float(time_str)) else: time_dict[key] = [np.float(time_str)] self.logger.write(f\"Writing times files\") self.logger.increase_global_indent() npy_paths = [] for", "npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): # Add to existing", "self.logger.write(f\"Found {len(times)} images for {exp_code} {step} {period}\") times_filename = f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir,", "import ensure_dir_exists from logger import Logger from crawler import Crawler class FrameTimesCrawler (Crawler):", "= f\"{exp_code}_{step}_{period}_frame_times.npy\" times_filepath = os.path.join(self.destination_dir, times_filename) np.save(times_filepath, times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir,", "From Helpyr import data_loading from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from", "as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ == \"__main__\": destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times' crawler =", "times) npy_paths.append(times_filepath) self.logger.decrease_global_indent() npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt') npy_path = Path(npy_list_filepath) if npy_path.is_file(): #", "taken, therefore can be used to figure out the frame rate for any", "import data_loading from helpyr_misc import nsplit from helpyr_misc import ensure_dir_exists from logger import", "self.destination_dir = destination_dir ensure_dir_exists(destination_dir, self.logger) def end(self): Crawler.end(self) self.logger.end_output() def collect_frame_times(self): self.collect_names(verbose_file_list=False) print()", "else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if __name__ ==", "collects all the image names. The names are timestamps for when images #", "p: print(f\"{p:4.0%}\", end='\\r') i_tracker = 0 for i, path in enumerate(paths): if (i", "logger import Logger from crawler import Crawler class FrameTimesCrawler (Crawler): # The FrameTimesCrawler", "| set(npy_paths))) else: self.logger.write(f\"{len(time_dict)} files written\") with npy_path.open('w') as fid: fid.write('\\n'.join(npy_paths)) self.logger.write(\"Done!\") if", "<gh_stars>0 import os import numpy as np from pathlib import Path # From", "class FrameTimesCrawler (Crawler): # The FrameTimesCrawler navigates through the backup data drives and" ]
[ "padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to", "The search is performed across the threads with a given tag. \"\"\" thread_ids,", "question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]]", "embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self,", "most similar thread for the question. The search is performed across the threads", "from chatterbot.trainers import ListTrainer from chatbot import * from utils import * import", "Returns id of the most similar thread for the question. The search is", "resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id),", "for the question. The search is performed across the threads with a given", "question): # Pass question to chitchat_bot to generate a response. response = self.model.get_response(self.sess,", "return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options", "with a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have", "import ListTrainer from chatbot import * from utils import * import tensorflow as", "similar thread for the question. The search is performed across the threads with", "self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a similar routine in the 3rd", "you have already implemented a similar routine in the 3rd assignment. question_vec =", "= question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return", "= tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to chitchat_bot to", "import * from utils import * import tensorflow as tf class ThreadRanker(object): def", "question, tag_name): \"\"\" Returns id of the most similar thread for the question.", "= paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings", "chatterbot import ChatBot from chatterbot.trainers import ListTrainer from chatbot import * from utils", "the threads with a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT:", "os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self,", "paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model", "routine in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin(", "embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings", "X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading", "saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to chitchat_bot to generate a", "paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings =", "thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id of the most similar thread", "ChatBot from chatterbot.trainers import ListTrainer from chatbot import * from utils import *", "tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver()", "__init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))", "tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question,", "def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess =", "start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass", "Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def", "similar routine in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread =", "ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self,", "the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1),", "thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a similar routine in", "create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20,", "self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver", "+ \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name):", "thread for the question. The search is performed across the threads with a", "def generate_answer(self, question): # Pass question to chitchat_bot to generate a response. response", "from utils import * import tensorflow as tf class ThreadRanker(object): def __init__(self, paths):", "-1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\")", "in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1,", "= os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def", "DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess", "thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options =", "self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids,", "tag_name): \"\"\" Returns id of the most similar thread for the question. The", "gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'],", "tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a", "self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300,", "unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id of the", "return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id of the most", "get_best_thread(self, question, tag_name): \"\"\" Returns id of the most similar thread for the", "best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def", "= tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'],", "implemented a similar routine in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim)", "a similar routine in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread", ") return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self):", "self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess,", "= unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id of", "import tensorflow as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS'])", "saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to chitchat_bot", "thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a similar routine", "class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)", "question to chitchat_bot to generate a response. response = self.model.get_response(self.sess, question) return response", "question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' )", "across the threads with a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) #", "HINT: you have already implemented a similar routine in the 3rd assignment. question_vec", "def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path)", "= pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self,", "generate_answer(self, question): # Pass question to chitchat_bot to generate a response. response =", "Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot()", "utils import * import tensorflow as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings,", "= Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691')", "tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to chitchat_bot to generate", "= self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a similar routine in the", "import * import tensorflow as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim", "tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER']", "the question. The search is performed across the threads with a given tag.", "\"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented a similar", "thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns", "class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def", "the most similar thread for the question. The search is performed across the", "Pass question to chitchat_bot to generate a response. response = self.model.get_response(self.sess, question) return", "import ChatBot from chatterbot.trainers import ListTrainer from chatbot import * from utils import", "of the most similar thread for the question. The search is performed across", "sklearn.metrics.pairwise import pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import ListTrainer from chatbot", "pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import ListTrainer from chatbot import *", "load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\")", "tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids,", "from chatbot import * from utils import * import tensorflow as tf class", "os from sklearn.metrics.pairwise import pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import ListTrainer", "from chatterbot import ChatBot from chatterbot.trainers import ListTrainer from chatbot import * from", "import pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import ListTrainer from chatbot import", "import os from sklearn.metrics.pairwise import pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import", "ListTrainer from chatbot import * from utils import * import tensorflow as tf", "metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths): print(\"Loading resources...\") self.create_chitchat_bot() def", "print(\"Loading resources...\") self.create_chitchat_bot() def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model =", "search is performed across the threads with a given tag. \"\"\" thread_ids, thread_embeddings", "def get_best_thread(self, question, tag_name): \"\"\" Returns id of the most similar thread for", "a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already", "chatterbot.trainers import ListTrainer from chatbot import * from utils import * import tensorflow", "def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name):", "given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you have already implemented", "self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name", "__load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + \".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return", "self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object):", "tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]'])", "hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question):", "from sklearn.metrics.pairwise import pairwise_distances_argmin from chatterbot import ChatBot from chatterbot.trainers import ListTrainer from", "have already implemented a similar routine in the 3rd assignment. question_vec = question_to_vec(question,", "max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): #", "__init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path", "# Pass question to chitchat_bot to generate a response. response = self.model.get_response(self.sess, question)", "is performed across the threads with a given tag. \"\"\" thread_ids, thread_embeddings =", "thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id of the most similar", "performed across the threads with a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)", "assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine'", "self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder,", "3rd assignment. question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings,", "already implemented a similar routine in the 3rd assignment. question_vec = question_to_vec(question, self.word_embeddings,", "question. The search is performed across the threads with a given tag. \"\"\"", "* import tensorflow as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim =", "= tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128, max_iter=20, start_symbol_id=word2id['[^]'], end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver =", "tensorflow as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder", "id of the most similar thread for the question. The search is performed", "# HINT: you have already implemented a similar routine in the 3rd assignment.", "self.word_embeddings, self.embeddings_dim) best_thread = pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class", "'checkpoints/model_four_691') def generate_answer(self, question): # Pass question to chitchat_bot to generate a response.", "def create_chitchat_bot(self): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.model = Seq2SeqModel(vocab_size=len(word2id), embeddings_size=300, hidden_size=128,", "paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path =", "\"\"\" Returns id of the most similar thread for the question. The search", "pairwise_distances_argmin( X=question_vec.reshape(1, -1), Y=thread_embeddings, metric='cosine' ) return thread_ids[best_thread[0]] class DialogueManager(object): def __init__(self, paths):", "end_symbol_id=word2id['[$]'], padding_symbol_id=word2id['[#]']) saver = tf.train.Saver() saver.restore(self.sess, 'checkpoints/model_four_691') def generate_answer(self, question): # Pass question", "* from utils import * import tensorflow as tf class ThreadRanker(object): def __init__(self,", "threads with a given tag. \"\"\" thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name) # HINT: you", "thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\" Returns id", "as tf class ThreadRanker(object): def __init__(self, paths): self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder =", "= load_embeddings(paths['WORD_EMBEDDINGS']) self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER'] def __load_embeddings_by_tag(self, tag_name): embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name +", "chatbot import * from utils import * import tensorflow as tf class ThreadRanker(object):", "\".pkl\") thread_ids, thread_embeddings = unpickle_file(embeddings_path) return thread_ids, thread_embeddings def get_best_thread(self, question, tag_name): \"\"\"" ]
[ "return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not", "TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save',", "it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\"", "sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks():", "FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based on the", "sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step", "will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save", "save the run. In case --nosave (default) ' 'output will be saved to", "that no weights have been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step =", "tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels", "' 'output will be saved to the folder TV_DIR_RUNS/debug ' 'hence it will", "'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the tv session with tf.Session().as_default():", "# -*- coding: utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\" from __future__", "<reponame>watsoncm/PruneSeg #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Trims weights on a pruned", "python # -*- coding: utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\" from", "= core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire the", "None, 'Append a name Tag to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:", "= int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count]", "os.path.exists(logdir): # weights are downloaded. Nothing to do return if not FLAGS.RUN ==", "hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0,", "# prepaire the tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1,", "np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0", "tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir) if __name__ == '__main__':", "'Append a name Tag to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean(", "runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir):", "= np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] =", "to run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.') if 'TV_SAVE' in", "further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case", "0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step)", "import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes,", "flags.DEFINE_string('project', None, 'Append a name Tag to run.') if 'TV_SAVE' in os.environ and", "queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the tv", "Create a session for running Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir,", "__future__ import absolute_import from __future__ import division from __future__ import print_function import json", "tensorvision.analyze as ana import tensorvision.utils as utils import tensorvision.core as core from evaluation", "on a pruned model.\"\"\" from __future__ import absolute_import from __future__ import division from", "to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to", "import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as", "exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins()", "# Create a session for running Ops on the Graph. trim_dir = 'RUNS/trimmed'", "import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as tf flags", "3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a session for running Ops", "os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\")", "= inf_out # prepaire the tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl,", "toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess,", "\"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\",", "np import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl')", "0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a", "if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting", "= 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN)", "toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value))", "Tag to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether", "if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes,", "tf.Graph().as_default(): # build the graph based on the loaded modules with tf.name_scope(\"Queues\"): queue", "None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \" that no weights have", "'hence it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether", "import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model", "image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire the tv session image_pl", "tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire the tv session image_pl =", "import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_):", "os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave (default)", "FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze as ana", "In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug", "\"\"\"Trims weights on a pruned model.\"\"\" from __future__ import absolute_import from __future__ import", "logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils", "'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)", "Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json'))", "saver) if cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \"", "for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value", "shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow", "flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name',", "KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except", "os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case", "-*- coding: utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\" from __future__ import", "name Tag to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True,", "l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path", "' 'hence it will get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\")", "logging.warning(\"This could mean,\" \" that no weights have been loaded.\") logging.warning(\"Starting Training with", "runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir,", "the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) else:", "def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import", "= core.build_training_graph(hypes, queue, modules) # prepare the tv session with tf.Session().as_default(): tv_sess =", "step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image =", "tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train", "os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing to do return if", "as ana import tensorvision.utils as utils import tensorvision.core as core from evaluation import", "3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out #", "the run. In case --nosave (default) ' 'output will be saved to the", "coding: utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\" from __future__ import absolute_import", "'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in", "if os.path.exists(logdir): # weights are downloaded. Nothing to do return if not FLAGS.RUN", "# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as tf flags = tf.app.flags", "name Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.') if", "# build the graph based on the loaded modules with tf.name_scope(\"Queues\"): queue =", "no weights have been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step = 0", "utils import tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for", "= json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir", "logging import os import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy", "segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): #", "None, 'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag", "--nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug ' 'hence", "tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the", "if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the", "tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] =", "utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with", "import numpy as np import tensorflow as tf flags = tf.app.flags FLAGS =", "is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \" that no weights", "running Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files',", "trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess,", "import division from __future__ import print_function import json import logging import os import", "core.build_training_graph(hypes, queue, modules) # prepare the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes)", "run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save", "'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver()", "os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based on", "= image_pl tv_graph['inf_out'] = inf_out # prepaire the tv session image_pl = tf.placeholder(tf.float32)", "weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value =", "any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3]", "not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\")", "downloaded. Nothing to do return if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile", "= 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1,", "' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will", "import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import", "import os import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as", "'save', True, ('Whether to save the run. In case --nosave (default) ' 'output", "zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use()", "= tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if", "case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug '", "download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try:", "== 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir)", "1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op =", "shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir,", "as tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as", "tensorvision.train as train import tensorvision.analyze as ana import tensorvision.utils as utils import tensorvision.core", "cur_step = core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded global_step is None.\")", "l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels]", "is None.\") logging.warning(\"This could mean,\" \" that no weights have been loaded.\") logging.warning(\"Starting", "'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to run.')", "True, ('Whether to save the run. In case --nosave (default) ' 'output will", "axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op", "weight_value[:, :, :, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path =", "open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS'", "= modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the tv session", "utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train", "0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl,", "the tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None,", "cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \" that no", "= flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze as ana import", "return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def", "= tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir) if", "in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes)", "from __future__ import print_function import json import logging import os import shutil import", "by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN)", "checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir) if __name__ == '__main__': tf.app.run()", "'output will be saved to the folder TV_DIR_RUNS/debug ' 'hence it will get", "evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing", "do return if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url,", ":, :, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir,", "runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir)", "tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3])", "prepaire the tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None,", "inf_out # prepaire the tv session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0)", "= tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) #", "= core.build_inference_graph(hypes, modules, image=image) # Create a session for running Ops on the", "get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the", "modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) #", "overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run.", "else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave (default)", "sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in", "import logging import os import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import", "try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please", "with step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image", "pruned model.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the tv session with tf.Session().as_default(): tv_sess", "FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name,", "a name Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.')", "tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train", "tv_graph['inf_out'] = inf_out # prepaire the tv session image_pl = tf.placeholder(tf.float32) image =", "'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return", "%s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],", "In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug,", "with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare", "get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir =", "be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by", "= tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze", "core.build_inference_graph(hypes, modules, image=image) # Create a session for running Ops on the Graph.", "folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by further runs.')) segmentation_weights_url =", ":, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt')", "to the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by further runs.'))", "json import logging import os import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070", "= core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This", "submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f)", "sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze as ana import tensorvision.utils as", "case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, '", "core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json',", "(default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it", "numpy as np import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS", "core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could", "the graph based on the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train')", "flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze as ana import tensorvision.utils", "run. In case --nosave (default) ' 'output will be saved to the folder", "tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step is None:", "'r') as f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in", "import json import logging import os import shutil import sys import collections #", "the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r')", "modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire the tv session", "2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op = tf.assign(weight,", "will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten", "have been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step = 0 with tf.name_scope('Validation'):", "weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the", "session image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out", "image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image)", "'incl') import tensorvision.train as train import tensorvision.analyze as ana import tensorvision.utils as utils", "on the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes,", "tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for", "= os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing to do return", "modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules) # prepare the tv session with", "are downloaded. Nothing to do return if not FLAGS.RUN == 'KittiSeg_pretrained': return import", "os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for", "model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a name", "--nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence", "https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as tf flags = tf.app.flags FLAGS", "execute:\" \"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f:", "'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run.", "from __future__ import division from __future__ import print_function import json import logging import", "the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver =", "as f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ:", "tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.')", "= os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based", "weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity'])", "queue, modules) # prepare the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess", "utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\")", "runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave", "kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.')", "tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess,", "for running Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir,", "logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1)", "return if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name = utils.download(segmentation_weights_url, runs_dir)", "except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init", "import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule", "loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl", "graph based on the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph", "logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build", "= tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes,", "global_step is None.\") logging.warning(\"This could mean,\" \" that no weights have been loaded.\")", "'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag to", "logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph", "None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out", "= os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming", "the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by further runs.')) segmentation_weights_url", "-*- \"\"\"Trims weights on a pruned model.\"\"\" from __future__ import absolute_import from __future__", "as utils import tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier", "it will get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir):", "could mean,\" \" that no weights have been loaded.\") logging.warning(\"Starting Training with step", "os import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np", "zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError:", "ana import tensorvision.utils as utils import tensorvision.core as core from evaluation import kitti_test", "'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules", "be saved to the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by", "'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could", "= l1_values.argsort()[:kernel_count] weight_value[:, :, :, toss_kernels] = 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op)", "with tf.Graph().as_default(): # build the graph based on the loaded modules with tf.name_scope(\"Queues\"):", "= tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2))", "utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules =", "maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing to", "if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count =", "tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded global_step is", "will be saved to the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten", "(\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are", "with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if", "import tensorvision.analyze as ana import tensorvision.utils as utils import tensorvision.core as core from", "tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave (default) '", "maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): #", "on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess", "in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] *", "layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values =", "as np import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1,", "the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue,", "0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None,", "loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph = core.build_training_graph(hypes, queue, modules)", "image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out']", "further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if", "'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None,", "= tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded global_step", "' 'hence it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True,", "'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir", "tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git", "weights have been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step = 0 with", "__future__ import division from __future__ import print_function import json import logging import os", "import absolute_import from __future__ import division from __future__ import print_function import json import", "tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import tensorvision.analyze as", "None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a session for running", "tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir) if __name__ ==", "saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further", "parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag", "__future__ import print_function import json import logging import os import shutil import sys", "sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as tf", "modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based on the loaded", "build the graph based on the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes,", "in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight)", "kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels =", "a name Tag to run.') if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save',", "storing model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project', None,", "not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1) with", "to do return if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name =", "from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File", "f: logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir", "train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes) with tf.Graph().as_default():", "(default) ' 'output will be saved to the folder TV_DIR_RUNS/debug ' 'hence it", "session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step", "core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire the tv", "by further runs.')) else: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In", "tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count", "= core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver)", "import tensorvision.train as train import tensorvision.analyze as ana import tensorvision.utils as utils import", "FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing to do return if not", "os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes)", "weights are downloaded. Nothing to do return if not FLAGS.RUN == 'KittiSeg_pretrained': return", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In case --nosave", "in os.environ and os.environ['TV_SAVE']: tf.app.flags.DEFINE_boolean( 'save', True, ('Whether to save the run. In", "hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :, :,", "modules) # prepare the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess =", "update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes", "mean,\" \" that no weights have been loaded.\") logging.warning(\"Starting Training with step 0.\")", "shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver)", "'model_files', 'hypes.json')) sess = tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight", "tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir) if __name__", "utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based on the loaded modules with", "with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None,", "= tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name", "a pruned model.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\")", "logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f:", "flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to", "the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess =", "cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0)", "model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append", "int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:,", "weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1,", "a session for running Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir)", "logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \" that no weights have been", "* hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2)) toss_kernels = l1_values.argsort()[:kernel_count] weight_value[:, :,", "= utils.load_modules_from_hypes(hypes) with tf.Graph().as_default(): # build the graph based on the loaded modules", "tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\" \"'git submodule update", "utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir = 'RUNS'", "logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing to do", "core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if", "= tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl']", "tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create", "inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] = inf_out # prepaire", "in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values = np.sum(np.abs(weight_value),", "--init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes =", "weights on a pruned model.\"\"\" from __future__ import absolute_import from __future__ import division", "tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules,", "tf.Session() saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer", "= (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights", "= utils.download(segmentation_weights_url, runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import", "= 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver =", "Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session()", "to save the run. In case --nosave (default) ' 'output will be saved", "train import tensorvision.analyze as ana import tensorvision.utils as utils import tensorvision.core as core", "# weights are downloaded. Nothing to do return if not FLAGS.RUN == 'KittiSeg_pretrained':", "Tag to run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.') if 'TV_SAVE'", "runs_dir) logging.info(\"Extracting KittiSeg_pretrained.zip\") zipfile.ZipFile(download_name, 'r').extractall(runs_dir) return def main(_): utils.set_gpus_to_use() try: import tensorvision.train import", "assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step) train.continue_training(trim_dir)", "for layer in hypes['layer_pruning']['layers']]): weight_value = tv_sess['sess'].run(weight) kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity']) l1_values", "been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables()", "absolute_import from __future__ import division from __future__ import print_function import json import logging", "import tensorvision.utils as utils import tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN',", "print_function import json import logging import os import shutil import sys import collections", "= tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step is", "Training with step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32)", "based on the loaded modules with tf.name_scope(\"Queues\"): queue = modules['input'].create_queues(hypes, 'train') tv_graph =", "# prepare the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess']", "None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl tv_graph['inf_out'] =", "submodules.\") logging.error(\"Please execute:\" \"'git submodule update --init --recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as", "tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver']", "'hence it will get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def", "session for running Ops on the Graph. trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes,", "image=image) # Create a session for running Ops on the Graph. trim_dir =", "0) image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) tv_graph['image_pl'] = image_pl", "image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out =", "run.') flags.DEFINE_string('project', None, 'Append a name Tag to run.') if 'TV_SAVE' in os.environ", "= 0 assign_op = tf.assign(weight, tf.constant(weight_value)) tv_sess['sess'].run(assign_op) checkpoint_path = os.path.join(trim_dir, 'model.ckpt') tv_sess['saver'].save(sess, checkpoint_path,", "trim_dir = 'RUNS/trimmed' shutil.copytree(logdir, trim_dir) shutil.copy(tf.app.flags.FLAGS.hypes, os.path.join(trim_dir, 'model_files', 'hypes.json')) sess = tf.Session() saver", "saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]):", "model.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "image.set_shape([1, None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a session", "flags = tf.app.flags FLAGS = flags.FLAGS sys.path.insert(1, 'incl') import tensorvision.train as train import", "as train import tensorvision.analyze as ana import tensorvision.utils as utils import tensorvision.core as", "saver = tf.train.Saver() core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in", "overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir,", "image_pl tv_graph['inf_out'] = inf_out # prepaire the tv session image_pl = tf.placeholder(tf.float32) image", "if cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\" \" that", "import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the submodules.\") logging.error(\"Please execute:\"", "for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a", "sess, saver) if cur_step is None: logging.warning(\"Loaded global_step is None.\") logging.warning(\"This could mean,\"", "tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3])", "as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model parameters.') flags.DEFINE_string('hypes',", "flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append a name", "tf.get_variable_scope().reuse_variables() image_pl = tf.placeholder(tf.float32) image = tf.expand_dims(image_pl, 0) image.set_shape([1, None, None, 3]) inf_out", "inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a session for running Ops on", "saver = tv_sess['saver'] cur_step = core.load_weights(logdir, sess, saver) if cur_step is None: logging.warning(\"Loaded", "TV_DIR_RUNS/debug ' 'hence it will get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\"", "\"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded.", "Nothing to do return if not FLAGS.RUN == 'KittiSeg_pretrained': return import zipfile download_name", "f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')", "def maybe_download_and_extract(runs_dir): logdir = os.path.join(runs_dir, FLAGS.RUN) if os.path.exists(logdir): # weights are downloaded. Nothing", "import shutil import sys import collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import", "logging.info(\"f: %s\", f) hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir =", "None, None, 3]) inf_out = core.build_inference_graph(hypes, modules, image=image) # Create a session for", "parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project', None, 'Append a", "import tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained', 'Modifier for model", "('Whether to save the run. In case --nosave (default) ' 'output will be", "utf-8 -*- \"\"\"Trims weights on a pruned model.\"\"\" from __future__ import absolute_import from", "folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) else: tf.app.flags.DEFINE_boolean(", "'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get", "with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step =", "collections # https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070 import numpy as np import tensorflow as tf flags =", "will get overwritten by further runs.')) segmentation_weights_url = (\"ftp://mi.eng.cam.ac.uk/\" \"pub/mttt2/models/KittiSeg_pretrained.zip\") def maybe_download_and_extract(runs_dir): logdir", "--recursive'\") exit(1) with open(tf.app.flags.FLAGS.hypes, 'r') as f: logging.info(\"f: %s\", f) hypes = json.load(f)", "import print_function import json import logging import os import shutil import sys import", "logging.warning(\"Starting Training with step 0.\") cur_step = 0 with tf.name_scope('Validation'): tf.get_variable_scope().reuse_variables() image_pl =", "division from __future__ import print_function import json import logging import os import shutil", "to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.'))", "prepare the tv session with tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver", "tf.Session().as_default(): tv_sess = core.start_tv_session(hypes) sess = tv_sess['sess'] saver = tv_sess['saver'] cur_step = core.load_weights(logdir,", "json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else: runs_dir =", "hypes = json.load(f) utils.load_plugins() if 'TV_DIR_RUNS' in os.environ: runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg') else:", "tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir = os.path.join(runs_dir, FLAGS.RUN) modules = utils.load_modules_from_hypes(hypes)", "modules, image=image) # Create a session for running Ops on the Graph. trim_dir", "None.\") logging.warning(\"This could mean,\" \" that no weights have been loaded.\") logging.warning(\"Starting Training", "\" that no weights have been loaded.\") logging.warning(\"Starting Training with step 0.\") cur_step", "else: runs_dir = 'RUNS' utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes) utils._add_paths_to_sys(hypes) train.maybe_download_and_extract(hypes) maybe_download_and_extract(runs_dir) logging.info(\"Trimming weights.\") logdir =", "tensorvision.utils as utils import tensorvision.core as core from evaluation import kitti_test flags.DEFINE_string('RUN', 'KittiSeg_pretrained',", "saved to the folder TV_DIR_RUNS/debug ' 'hence it will get overwritten by further", "core.load_weights(trim_dir, sess, saver) for weight in tf.contrib.model_pruning.get_masks(): if any([layer in weight.name for layer", "main(_): utils.set_gpus_to_use() try: import tensorvision.train import tensorflow_fcn.utils except ImportError: logging.error(\"Could not import the", "'File storing model parameters.') flags.DEFINE_string('name', None, 'Append a name Tag to run.') flags.DEFINE_string('project',", "'Modifier for model parameters.') flags.DEFINE_string('hypes', 'hypes/KittiSeg.json', 'File storing model parameters.') flags.DEFINE_string('name', None, 'Append" ]
[ "# from .entities import Listing, Seller # from .repositories import ListingRepository # from", "# ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self, listing:", "Seller # from .repositories import ListingRepository # from .rules import ( # ListingMustBeInDraftState,", "from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService:", "import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def", "from seedwork.domain.services import DomainService # from seedwork.domain.value_objects import UUID # from .entities import", "from .entities import Listing, Seller # from .repositories import ListingRepository # from .rules", "Listing, Seller # from .repositories import ListingRepository # from .rules import ( #", ") # class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status))", "# from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class", "import Listing, Seller # from .repositories import ListingRepository # from .rules import (", "# SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self, listing: Listing, seller:", "import UUID # from .entities import Listing, Seller # from .repositories import ListingRepository", "# from .repositories import ListingRepository # from .rules import ( # ListingMustBeInDraftState, #", "UUID # from .entities import Listing, Seller # from .repositories import ListingRepository #", ".entities import Listing, Seller # from .repositories import ListingRepository # from .rules import", "from .repositories import ListingRepository # from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing,", "# ) # class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): #", "CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status)) # self.check_rule(SellerMustBeEligibleForAddingNextListing(seller)) #", "seedwork.domain.services import DomainService # from seedwork.domain.value_objects import UUID # from .entities import Listing,", "seedwork.domain.value_objects import UUID # from .entities import Listing, Seller # from .repositories import", "# from seedwork.domain.value_objects import UUID # from .entities import Listing, Seller # from", "# from seedwork.domain.services import DomainService # from seedwork.domain.value_objects import UUID # from .entities", "import DomainService # from seedwork.domain.value_objects import UUID # from .entities import Listing, Seller", "import ListingRepository # from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # )", ".repositories import ListingRepository # from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, #", "DomainService # from seedwork.domain.value_objects import UUID # from .entities import Listing, Seller #", "ListingRepository # from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) #", "SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller):", "( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self,", "# class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status)) #", "from seedwork.domain.value_objects import UUID # from .entities import Listing, Seller # from .repositories", ".rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: #", "class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status)) # self.check_rule(SellerMustBeEligibleForAddingNextListing(seller))", "ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self, listing: Listing,", "# def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status)) # self.check_rule(SellerMustBeEligibleForAddingNextListing(seller)) # listing.publish()" ]
[ "цену output['price_base'] = self._price_base(item) # записвыем ссылку на план объекта output['plan'] = self._get_image(item)", "и номера квартиры Возвращает словарь с ключами ['section', 'floor', 'number', 'building'] :param data:", "берем следующий ЖК if answer.status_code == 404: continue raw_data = answer.content content =", "'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section':", "На вход принимает bs4.element.Tag. Производит поиск по div классу tile__image. С помощью регулярного", "# обновляем в словаре ключи с данными для комнат в квартире, площади +", "= item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return output def", "class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return", "value elif title == 'этаж': result['floor'] = value elif title == 'номер': result['number']", "parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.'))", "bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то возвращает номер последней страницы. :param", "по каждому парковочному месту for item in raw_data: # Бремем копию исходного словаря", "item in raw_data: # Бремем копию исходного словаря с ключами в который будем", "pages: for i in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data =", "страницу. Добавляем к URL /parking location, url = item url += '/parking' answer", "def _get_complex_item(self, data): \"\"\" Метод для поиска информации о квартире Поиск корпуса, секции,", "data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по каждому парковочному месту for item", "[] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице", "int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace('", "complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True)", "'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat' return result def", "# добавляем ?page=n к URL page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content", "output.update(self._get_parking_info(item)) # добавляем словарь в список который будем возвращать result.append(output) return result def", "= value elif title == 'секция': result['section'] = value elif title == 'этаж':", "класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return: list of dicts \"\"\"", "на странице pages = self._find_pagination(content) if pages: for i in range(1, pages +", "объект ЖК :param data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская", "данных о продаже парковочных мест Возвращает список словарей с данными о парковочных местах", "добавляем словарь в список который будем возвращать result.append(output) return result def _get_parking_info(self, data):", "URL /parking location, url = item url += '/parking' answer = self.session.get(url) #", "записи данных о отдельном парковочном месте На вход принимает объект класса bs4.BeautifulSoup :param", "= parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = { 'number': number, 'building': building,", "def _get_complex(self, data): \"\"\" Метод для поиска имени ЖК и его региона :param", "= None price_sale = None building = None area = None section =", "class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True)", "'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor',", "\"\"\" output = [] raw_data = data.find_all('div', {'class': 'tile__content'}) for item in raw_data:", "{'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в том", "[] # Итерируемся по списку ЖК for item in self.objects_list: # забираем имя", "output = [] raw_data = data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name", "else: objects = self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция принимает на", "будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК и регион output['complex']", "def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах в новостройках + данные", "\"\"\" # исходный список объектов который будем возвращать objects = [] raw_data =", "None number = None urn = data.get('href') parking_url = self.base_url + urn parking_data", "данными о парковочных местах :return: list of dicts \"\"\" objects = [] #", "int(pages[-2].text) return last_page return False def _get_image(self, data): \"\"\" Метод для парсинга схемы", "'').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0])", "= parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area", "парковочном месте На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location:", "местах :return: list of dicts \"\"\" objects = [] # Итерируемся по списку", "+ определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result = dict()", "для поиска очереди строительства :param data: bs4.element.Tag :return: str \"\"\" try: phase =", "plan_img, 'section': section, 'floor': floor } return output_dict if __name__ == '__main__': ndv", "self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в", "dicts - if json_file=None :return: json_file - if json_file=True \"\"\" print('Starting data parsing...')", "output['phase'] = self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку на", "месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список который", "raw_data = answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>.", "# Поиск отдельных объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') #", "_get_image(self, data): \"\"\" Метод для парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит", "data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex +=", "парковочных мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле", "try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span',", "return objects def _get_objects(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup.", "+ parking if json_file is None: return data_result else: with open('ndv_ru.json', 'w') as", "# записываем очередь строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item)", "= self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных о продаже квартир в", "if json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result", "квартире, площади + определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result", "парковочных мест Возвращает список словарей с данными о парковочных местах :return: list of", "= parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на план try: plan_div", "будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') #", "result['floor'] = value elif title == 'номер': result['number'] = value return result def", "= [] # Поиск отдельных объектов парковочных мест на сранице ЖК raw_data =", "Метод для получения данных о продаже парковочных мест Возвращает список словарей с данными", "= content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в", "+ типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который будем возвращать result.append(output)", "# записываем данные о парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) #", "для записи данных о отдельном парковочном месте На вход принимает объект класса bs4.BeautifulSoup", "'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor':", "try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass #", "bs4.BeautifulSoup :return: list of dict \"\"\" result = [] # Поиск отдельных объектов", "полученные данные в json файл :return: list of dicts - if json_file=None :return:", "'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor', 'number',", "for i in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content", "result['type'] = 'flat' return result def _write_flats_data(self, data): \"\"\" Метод для записи данных", "data: bs4.element.Tag :return: str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text(", "парковочных местах :return: list of dicts \"\"\" objects = [] # Итерируемся по", "словарь в список который будем возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\"", "список кортежей с именем ЖК и его URL :param url: str :return: list", "исходный список объектов который будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content", "объектов парковочных мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в", "= item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class':", "'floor', 'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in", "# поиск номера парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number", "места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else:", "get_parking_data(self): \"\"\" Метод для получения данных о продаже парковочных мест Возвращает список словарей", "class NdvParser: def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats'", "в список который будем возвращать result.append(output) return result def _write_parking_data(self, data, location): \"\"\"", "return objects def get_parking_data(self): \"\"\" Метод для получения данных о продаже парковочных мест", "для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи", "данных о отдельном парковочном месте На вход принимает объект класса bs4.BeautifulSoup :param data:", "is None: return data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success')", "json_file=None): \"\"\" Метод парсит данные о квартирах в новостройках + данные о парковочных", "plan == '/img/new-design/no-image.svg': return None return plan except AttributeError: return None def _get_complex(self,", "Если страница есть, но в данный момент 0 предложений, берем следующий ЖК if", "{'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page return False def _get_image(self,", "self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects", "поиск цены (в том числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base", "(AttributeError, IndexError): pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError):", "def _get_image(self, data): \"\"\" Метод для парсинга схемы квартиры На вход принимает bs4.element.Tag.", "\"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location =", "'apartment' else: result['type'] = 'flat' return result def _write_flats_data(self, data): \"\"\" Метод для", "# исходный список объектов который будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content", "\"\"\" Метод производит поиск кол-ва комнат в квартире, площади + определение типа апартаменты/квартира", "= flats + parking if json_file is None: return data_result else: with open('ndv_ru.json',", "class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг", "'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return output def _find_pagination(self, data): \"\"\"", "= data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page return False", "о парковочных местах :return: list of dicts \"\"\" objects = [] # Итерируемся", "в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def _get_objects(self, data):", "urn = data.get('href') parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') #", ":param data: bs4.element.Tag :return: str (image src url) \"\"\" try: plan = data.find('div',", "pass # парсинг данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div',", "Возвращает список словарей с данными о квартирах :return: list of dicts \"\"\" #", "parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area =", "src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if", "ndv = NdvParser() # Запускаем парсер на квартиры и машиноместа. # Данные записываются", "и его региона :param data: bs4.element.Tag :return: str \"\"\" try: complex = data.find(", "try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find(", "принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict \"\"\" result", "секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список который будем возвращать result.append(output)", "else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о", "номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными для комнат в", "data_result = flats + parking if json_file is None: return data_result else: with", "import json import re import requests from bs4 import BeautifulSoup as soup DICT_KEYS", "ЖК if answer.status_code == 404: continue raw_data = answer.content content = soup(raw_data, 'html.parser')", "\"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page", "определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result = dict() name", "Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже row = content.find('a',", "self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция принимает на вход объект класса", "производит поиск кол-ва комнат в квартире, площади + определение типа апартаменты/квартира :param data:", "= item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value elif title == 'секция':", "raw_data = data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name = item.select_one('a', {'class':", "id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в данный момент", "= self.parser_dict.copy() # записываем имя ЖК и регион output['complex'] = location # записываем", "= dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower()", "'html.parser') # поиск номера парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number:", "\"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он", "data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по каждому объявлению for item in", "= float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms']", "area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг корпуса try: building", "pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser')", "row).group()) # Если страница есть, но в данный момент 0 предложений, берем следующий", "dicts \"\"\" result = [] # Поиск отдельных объектов парковочных мест на сранице", "parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в", "info = data.find_all('div', class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value =", "'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения", "not number: continue # Поиск паджинатора на странице pages = self._find_pagination(content) if pages:", "['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys = ('section',", "квартир в новостройках Возвращает список словарей с данными о квартирах :return: list of", "int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг данных о парковочном месте(метраж, копус,", "записывать данные output = self.parser_dict.copy() # записываем имя ЖК и его регион output['complex']", "flats = self.get_flats_data() parking = self.get_parking_data() data_result = flats + parking if json_file", "\"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan ==", "= float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг корпуса try: building =", "data): \"\"\" Метод для поиска очереди строительства :param data: bs4.element.Tag :return: str \"\"\"", "к URL /parking location, url = item url += '/parking' answer = self.session.get(url)", "page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') #", "return False def _get_image(self, data): \"\"\" Метод для парсинга схемы квартиры На вход", "plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в том числе со скидкой) try:", ":param location: str :return: list of dicts \"\"\" result = [] # Поиск", "except AttributeError: pass # парсинг данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info", "item in self.objects_list: # забираем имя ЖК и ссылку на его страницу. Добавляем", "для комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в", "parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except", "self.parser_dict.copy() # записываем имя ЖК и его регион output['complex'] = self._get_complex(item) # записываем", "self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}'", "answer.status_code == 404: continue raw_data = answer.content content = soup(raw_data, 'html.parser') # Поиск", "# обновляем в словаре ключи с данными для корпуса, секции, этажа и номера", "данных о продаже квартир в новостройках Возвращает список словарей с данными о квартирах", "'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in info:", "class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None return plan", "и ссылку на его страницу. Добавляем к URL /parking location, url = item", "self.session.get(url) # проверка есть ли в продаже парковочне места. Если нет, берем следующий", "Возвращает словарь с ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict", "result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type'] =", "странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по каждому объявлению", "self.get_flats_data() parking = self.get_parking_data() data_result = flats + parking if json_file is None:", "ЖК for item in self.objects_list: # забираем имя ЖК и ссылку на его", "'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\"", "Метод парсит данные о квартирах в новостройках + данные о парковочных местах Записывает", "location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})',", "else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\"", ":return: list of dicts - if json_file=None :return: json_file - if json_file=True \"\"\"", "принимает на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то", "в список который будем возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод", "'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self):", "# записываем имя ЖК и его регион output['complex'] = self._get_complex(item) # записываем очередь", "dicts \"\"\" objects = [] # Итерируемся по списку ЖК for item in", "список словарей с данными о квартирах :return: list of dicts \"\"\" # исходный", "['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms',", "name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat' return result def _write_flats_data(self, data):", "\"\"\" Метод возвращает список кортежей с именем ЖК и его URL :param url:", "корпуса, секции, этажа и номера квартиры Возвращает словарь с ключами ['section', 'floor', 'number',", "pages+1): page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser')", "Запускаем парсер на квартиры и машиноместа. # Данные записываются в json файл ndv.get_full_data(json_file=True)", "= parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг корпуса", "региона :param data: bs4.element.Tag :return: str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link", "данные о квартирах в новостройках + данные о парковочных местах Записывает полученные данные", "о парковочных местах Записывает полученные данные в json файл :return: list of dicts", ":param data: bs4.BeautifulSoup :param location: str :return: list of dicts \"\"\" result =", "проверка есть ли в продаже парковочне места. Если нет, берем следующий ЖК if", "его региона :param data: bs4.element.Tag :return: str \"\"\" try: complex = data.find( 'a',", "pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict", "= location # записываем данные о парковочном месте (площаь, корпус, секция, этаж, план)", "Производит поиск пагинатора. Если он есть то возвращает номер последней страницы. :param data:", "self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для получения данных о продаже парковочных", "number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в данный момент 0", "Поиск кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group())", "url = item url += '/parking' answer = self.session.get(url) # проверка есть ли", "name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a',", "= url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем)", "as soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale',", "класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку на объект ЖК :param data:", "f'({location})' return complex except AttributeError: return None def _get_phase(self, data): \"\"\" Метод для", "dict \"\"\" result = [] # Поиск отдельных объектов объявлений на странице raw_data", "if pages: for i in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data", "ключи с данными для комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) #", "def _write_parking_data(self, data, location): \"\"\" Метод для записи данных о отдельном парковочном месте", "= self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = url +", "предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если", "int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace('", "return None def _get_complex(self, data): \"\"\" Метод для поиска имени ЖК и его", "except AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод для поиска информации о", "номера парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№',", "from bs4 import BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section',", "location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError: return None", "= item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value elif", "js-tile-link') # в цикле проходим по каждому объявлению for item in raw_data: #", "json_file - if json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking =", "try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try:", "== '__main__': ndv = NdvParser() # Запускаем парсер на квартиры и машиноместа. #", "None return plan except AttributeError: return None def _get_complex(self, data): \"\"\" Метод для", "'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency',", "class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError: return None def _get_phase(self, data):", "'.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if", "отдельных объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле", "try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа try: floor", "'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article',", "None urn = data.get('href') parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser')", "строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку", "«Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div', {'class':", "каждому объявлению for item in raw_data: # Бремем копию исходного словаря с ключами", "class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def _get_complex_item(self, data):", "# добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def", "json_file=None :return: json_file - if json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data()", "bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict \"\"\" result = [] #", "price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг данных о парковочном", "для парсинга данных о парковочном месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img", "парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит поиск по div классу tile__image.", "return None def _get_complex_item(self, data): \"\"\" Метод для поиска информации о квартире Поиск", "except (AttributeError, IndexError): pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError,", "имя ЖК и ссылку на его страницу. Добавляем к URL /parking location, url", "этажа и номера квартиры Возвращает словарь с ключами ['section', 'floor', 'number', 'building'] :param", "price_base except AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод для поиска информации", "'.')) except (AttributeError, IndexError): pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except", "if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except", "number: continue # Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for", "import BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished',", "записываем данные о парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем", "Метод для поиска имени ЖК и его региона :param data: bs4.element.Tag :return: str", ":param data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')]", "этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади", "/parking location, url = item url += '/parking' answer = self.session.get(url) # проверка", "исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def _get_objects(self, data): \"\"\"", "есть ли в продаже парковочне места. Если нет, берем следующий ЖК if answer.status_code", "'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser:", "str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location", "of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data", "elif title == 'секция': result['section'] = value elif title == 'этаж': result['floor'] =", "имя ЖК и его регион output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase']", "= parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price", "'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent',", "квартиры :param data: bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base", "'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment']", "range(1, pages+1): page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data,", "= self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location))", "list of dict \"\"\" result = [] # Поиск отдельных объектов объявлений на", "его страницу. Добавляем к URL /parking location, url = item url += '/parking'", "raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages", "keys = ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item')", "принимает bs4.element.Tag. Производит поиск по div классу tile__image. С помощью регулярного выражения забирает", "__name__ == '__main__': ndv = NdvParser() # Запускаем парсер на квартиры и машиноместа.", "pass output_dict = { 'number': number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base':", "result['section'] = value elif title == 'этаж': result['floor'] = value elif title ==", "# в цикле проходим по каждому парковочному месту for item in raw_data: #", "объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим", "# Если страница есть, но в данный момент 0 предложений, берем следующий ЖК", "answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск кол-ва", "if pages: for i in range(1, pages + 1): # добавляем ?page=n к", "class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title", "= int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в данный момент 0 предложений,", "ЖК и его регион output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase'] =", "number or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page =", "о квартире Поиск корпуса, секции, этажа и номера квартиры Возвращает словарь с ключами", "= int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base =", "список который будем возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод для", "= soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content)", "card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area", ":param url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')]", "\"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result = flats", ").get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг данных о", "def get_parking_data(self): \"\"\" Метод для получения данных о продаже парковочных мест Возвращает список", "+ urn)) return output def _find_pagination(self, data): \"\"\" Функция принимает на вход объект", "result['type'] = 'apartment' else: result['type'] = 'flat' return result def _write_flats_data(self, data): \"\"\"", "= self._get_image(item) # обновляем в словаре ключи с данными для корпуса, секции, этажа", "bs4.element.Tag :return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError:", "очереди строительства :param data: bs4.element.Tag :return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True)", ":param data: bs4.BeautifulSoup :return: list of dict \"\"\" result = [] # Поиск", "location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах в", "= self._find_pagination(content) if pages: for i in range(1, pages + 1): # добавляем", "страницы. :param data: bs4.BeautifulSoup :return: int last page number or False \"\"\" pages", "площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass", "', '')) except AttributeError: pass # парсинг данных о парковочном месте(метраж, копус, секцияб", "= answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск", "urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number = parking_data.find('meta',", "этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = { 'number':", "'tile__content'}) for item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span',", "мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим", "поиска очереди строительства :param data: bs4.element.Tag :return: str \"\"\" try: phase = data.find('span',", "plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None return plan except", "регулярного выражения забирает URL :param data: bs4.element.Tag :return: str (image src url) \"\"\"", "URL page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser')", "price_base = None price_sale = None building = None area = None section", "\"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def", "result['number'] = value return result def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва", "'flat' return result def _write_flats_data(self, data): \"\"\" Метод для записи данных о отдельной", "будем возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод для парсинга данных", "# добавляем словарь в список который будем возвращать result.append(output) return result def _get_parking_info(self,", "pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass #", "= data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по каждому объявлению for item", "списку ЖК for item in self.objects_list: # забираем имя ЖК и ссылку на", "last_page return False def _get_image(self, data): \"\"\" Метод для парсинга схемы квартиры На", "о отдельной квартире в словарь На вход принимает объект класса bs4.BeautifulSoup :param data:", "self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = url + f'?page={i}'", "float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] =", "price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor } return output_dict if", "data): \"\"\" Метод для парсинга данных о парковочном месте :param data: bs4.element.Tag :return:", "<<Показать n предложений>>. Поиск кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number", "content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в данный", "'__main__': ndv = NdvParser() # Запускаем парсер на квартиры и машиноместа. # Данные", "value elif title == 'секция': result['section'] = value elif title == 'этаж': result['floor']", "он есть то возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return: int last", "== 'номер': result['number'] = value return result def _get_dimentions(self, data): \"\"\" Метод производит", "# Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже row =", "# записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку на план объекта output['plan']", "улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div', {'class': 'tile__content'}) for item", "Если он есть то возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return: int", "ли в продаже парковочне места. Если нет, берем следующий ЖК if answer.status_code ==", "цены (в том числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base =", "== 'этаж': result['floor'] = value elif title == 'номер': result['number'] = value return", "try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции try: section", "возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return: int last page number or", "raw_data: # Бремем копию исходного словаря с ключами в который будем записывать данные", "получения данных о продаже парковочных мест Возвращает список словарей с данными о парковочных", "исходного словаря с ключами в который будем записывать данные output = self.parser_dict.copy() #", ":return: int last page number or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'})", "'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects", "класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict \"\"\" result = []", "else: objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для получения данных", "_get_objects(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет название жк,", "ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div', {'class': 'tile__content'})", "output def _find_pagination(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Производит", "- if json_file=None :return: json_file - if json_file=True \"\"\" print('Starting data parsing...') flats", "self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else:", "о парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data =", "= self.session.get(url) # проверка есть ли в продаже парковочне места. Если нет, берем", "elif title == 'номер': result['number'] = value return result def _get_dimentions(self, data): \"\"\"", "о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница", "# записываем имя ЖК и регион output['complex'] = location # записываем данные о", "данными для комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь", "_write_parking_data(self, data, location): \"\"\" Метод для записи данных о отдельном парковочном месте На", "площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который будем возвращать", "= soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content)", "price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base", "'discount', 'comment'] class NdvParser: def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats", "[] # Поиск отдельных объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link')", "i in range(1, pages+1): page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content content", "= parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base =", "= None urn = data.get('href') parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content,", "# добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def", "= 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для", "parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result = flats + parking if", "список который будем возвращать result.append(output) return result def _write_parking_data(self, data, location): \"\"\" Метод", "raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try:", "на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по каждому", "data: bs4.element.Tag :return: dict \"\"\" result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True)", "def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url =", "# поиск ссылки на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img =", "в квартире, площади + определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\"", "Бремем копию исходного словаря с ключами в который будем записывать данные output =", "data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page return False def", ":return: dict \"\"\" result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] =", "вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict \"\"\"", "ключи с данными для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем", "'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor',", "# забираем имя ЖК и ссылку на его страницу. Добавляем к URL /parking", "except (AttributeError, IndexError): pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError,", "else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на", "result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.'))", "кортежей с именем ЖК и его URL :param url: str :return: list of", ":return: dict \"\"\" keys = ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info", "для парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит поиск по div классу", "список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция", "поиск пагинатора. Если он есть то возвращает номер последней страницы. :param data: bs4.BeautifulSoup", "location: str :return: list of dicts \"\"\" result = [] # Поиск отдельных", "о продаже квартир в новостройках Возвращает список словарей с данными о квартирах :return:", "с именем ЖК и его URL :param url: str :return: list of tuples", "data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location", "для получения данных о продаже парковочных мест Возвращает список словарей с данными о", "bs4.element.Tag. Производит поиск по div классу tile__image. С помощью регулярного выражения забирает URL", "= None price_base = None price_sale = None building = None area =", "self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод", "= soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number = parking_data.find('meta', {'content': '10'})", "на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError:", "self._get_image(item) # обновляем в словаре ключи с данными для корпуса, секции, этажа и", "для поиска цены квартиры :param data: bs4.element.Tag :return: str \"\"\" try: price_base =", "data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если", "== 404: continue raw_data = answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки", "requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS)", "отдельных объектов парковочных мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') #", "in name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat' return result def _write_flats_data(self,", "except AttributeError: pass # поиск цены (в том числе со скидкой) try: price_base", "ЖК if not number: continue # Поиск паджинатора на странице pages = self._find_pagination(content)", "объекта output['plan'] = self._get_image(item) # обновляем в словаре ключи с данными для корпуса,", "'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor } return output_dict", "urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return output", "последней страницы. :param data: bs4.BeautifulSoup :return: int last page number or False \"\"\"", "of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data", "в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который", "места. Если нет, берем следующий ЖК if answer.status_code == 404: continue raw_data =", "try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg':", "'')) except AttributeError: pass # парсинг данных о парковочном месте(метраж, копус, секцияб этаж)", "вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то возвращает номер", "= parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True)", "данными о квартирах :return: list of dicts \"\"\" # исходный список объектов который", "= data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по каждому парковочному месту for", "квартире Поиск корпуса, секции, этажа и номера квартиры Возвращает словарь с ключами ['section',", "= self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для получения данных о продаже", "self.base_url + urn)) return output def _find_pagination(self, data): \"\"\" Функция принимает на вход", "+= f'({location})' return complex except AttributeError: return None def _get_phase(self, data): \"\"\" Метод", "raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг", "def _get_parking_info(self, data): \"\"\" Метод для парсинга данных о парковочном месте :param data:", "есть то возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return: int last page", "data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None return", "data): \"\"\" Метод для поиска информации о квартире Поиск корпуса, секции, этажа и", "копию исходного словаря с ключами в который будем записывать данные output = self.parser_dict.copy()", "на план объекта output['plan'] = self._get_image(item) # обновляем в словаре ключи с данными", "= data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def _price_base(self, data): \"\"\"", "'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в том числе", "in raw_data: # Бремем копию исходного словаря с ключами в который будем записывать", "self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в", "Возвращает список словарей с данными о парковочных местах :return: list of dicts \"\"\"", "квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который будем возвращать result.append(output) return result", "plan).group('url') if plan == '/img/new-design/no-image.svg': return None return plan except AttributeError: return None", "записи данных о отдельной квартире в словарь На вход принимает объект класса bs4.BeautifulSoup", "except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale", "по каждому объявлению for item in raw_data: # Бремем копию исходного словаря с", "# парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг", "data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms']", "записвыем ссылку на план объекта output['plan'] = self._get_image(item) # обновляем в словаре ключи", "raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content))", "= None section = None floor = None number = None urn =", "objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах в новостройках +", "None floor = None number = None urn = data.get('href') parking_url = self.base_url", "= parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number", "BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale',", "\"\"\" Метод для получения данных о продаже квартир в новостройках Возвращает список словарей", "[('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div',", "# Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for i in", "\"\"\" objects = [] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск", "возвращать result.append(output) return result def _write_parking_data(self, data, location): \"\"\" Метод для записи данных", "raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по каждому парковочному месту", "objects def get_parking_data(self): \"\"\" Метод для получения данных о продаже парковочных мест Возвращает", "= item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url", "проходим по каждому парковочному месту for item in raw_data: # Бремем копию исходного", "row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но", "+ 1): # добавляем ?page=n к URL page_url = self.new_buildings_url + f'?page={i}' raw_data", "парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError):", "местах Записывает полученные данные в json файл :return: list of dicts - if", "objects = [] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора", "ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys =", "pages = self._find_pagination(content) if pages: for i in range(1, pages + 1): #", "range(1, pages + 1): # добавляем ?page=n к URL page_url = self.new_buildings_url +", "будем возвращать result.append(output) return result def _write_parking_data(self, data, location): \"\"\" Метод для записи", "None: return data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def", "requests from bs4 import BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase', 'building',", "\"\"\" Метод для получения данных о продаже парковочных мест Возвращает список словарей с", "'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature',", "в словарь На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list", "= re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None return plan except AttributeError:", "dict \"\"\" plan_img = None price_base = None price_sale = None building =", "'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished',", "return plan except AttributeError: return None def _get_complex(self, data): \"\"\" Метод для поиска", "_get_complex_item(self, data): \"\"\" Метод для поиска информации о квартире Поиск корпуса, секции, этажа", "data: bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3]))", "данных о отдельной квартире в словарь На вход принимает объект класса bs4.BeautifulSoup :param", "{'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1',", "name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type']", "title == 'номер': result['number'] = value return result def _get_dimentions(self, data): \"\"\" Метод", ":return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects =", "{'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return output def _find_pagination(self, data):", "parking = self.get_parking_data() data_result = flats + parking if json_file is None: return", "в цикле проходим по каждому парковочному месту for item in raw_data: # Бремем", "= self.get_parking_data() data_result = flats + parking if json_file is None: return data_result", "копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') #", "div классу tile__image. С помощью регулярного выражения забирает URL :param data: bs4.element.Tag :return:", "card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг данных", "soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return", "int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод для", "# парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict =", "bs4.element.Tag :return: dict \"\"\" plan_img = None price_base = None price_sale = None", "поиск кол-ва комнат в квартире, площади + определение типа апартаменты/квартира :param data: bs4.element.Tag", "try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = { 'number': number,", "список словарей с данными о парковочных местах :return: list of dicts \"\"\" objects", "поиск по div классу tile__image. С помощью регулярного выражения забирает URL :param data:", "Метод для поиска информации о квартире Поиск корпуса, секции, этажа и номера квартиры", ":return: list of dicts \"\"\" objects = [] # Итерируемся по списку ЖК", "квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который будем", "в продаже парковочне места. Если нет, берем следующий ЖК if answer.status_code == 404:", "import re import requests from bs4 import BeautifulSoup as soup DICT_KEYS = ['complex',", "= value elif title == 'этаж': result['floor'] = value elif title == 'номер':", "на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то возвращает", "название жк, регион и ссылку на объект ЖК :param data: bs4.BeautifulSoup :return: list", "IndexError): pass output_dict = { 'number': number, 'building': building, 'area': area, 'price_sale': price_sale,", "AttributeError: return None def _price_base(self, data): \"\"\" Метод для поиска цены квартиры :param", "price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor } return", "NdvParser() # Запускаем парсер на квартиры и машиноместа. # Данные записываются в json", "объявлению for item in raw_data: # Бремем копию исходного словаря с ключами в", "'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page return False def _get_image(self, data):", "parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True) except", "ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content content =", "на странице pages = self._find_pagination(content) if pages: for i in range(1, pages+1): page_url", "AttributeError: pass # поиск цены (в том числе со скидкой) try: price_base =", "'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects", "и его регион output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item)", "Если нет, берем следующий ЖК if answer.status_code == 404: continue raw_data = answer.content", "json_file is None: return data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file)", "данных о парковочном месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img = None", "soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for", "в словаре ключи с данными для комнат в квартире, площади + типа квартиры", "вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return: list", "'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name',", "for item in self.objects_list: # забираем имя ЖК и ссылку на его страницу.", "= int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale =", "= self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем)", "= None number = None urn = data.get('href') parking_url = self.base_url + urn", "= None floor = None number = None urn = data.get('href') parking_url =", "словарей с данными о квартирах :return: list of dicts \"\"\" # исходный список", "месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number')", "продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть,", "добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def get_parking_data(self):", "парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = {", "список объектов который будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content =", "data.find_all('div', class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if", "страница есть, но в данный момент 0 предложений, берем следующий ЖК if not", "секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг", "\"\"\" plan_img = None price_base = None price_sale = None building = None", "с данными для комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем", "'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view',", "- if json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking = self.get_parking_data()", "о парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в", "имени ЖК и его региона :param data: bs4.element.Tag :return: str \"\"\" try: complex", "Ищет название жк, регион и ссылку на объект ЖК :param data: bs4.BeautifulSoup :return:", "= requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict =", "комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список", "} return output_dict if __name__ == '__main__': ndv = NdvParser() # Запускаем парсер", "8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div', {'class': 'tile__content'}) for item in", "str (image src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\",", "bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys)", "class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0]", "номера квартиры Возвращает словарь с ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag", "8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') #", "of dicts \"\"\" # исходный список объектов который будем возвращать objects = []", "return output_dict if __name__ == '__main__': ndv = NdvParser() # Запускаем парсер на", "url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в", "= data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def", "f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список", "= 'flat' return result def _write_flats_data(self, data): \"\"\" Метод для записи данных о", "= self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else:", "{'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn))", "{'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name", "url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan", "self._find_pagination(content) if pages: for i in range(1, pages + 1): # добавляем ?page=n", "Метод для поиска очереди строительства :param data: bs4.element.Tag :return: str \"\"\" try: phase", "item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус':", "return output def _find_pagination(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup.", "data.get('href') parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера", "записываем имя ЖК и его регион output['complex'] = self._get_complex(item) # записываем очередь строительства", "# поиск цены (в том числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True)", "js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex", "self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base'] =", "Производит поиск по div классу tile__image. С помощью регулярного выражения забирает URL :param", "_get_complex(self, data): \"\"\" Метод для поиска имени ЖК и его региона :param data:", "который будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК и его", "парковочне места. Если нет, берем следующий ЖК if answer.status_code == 404: continue raw_data", "\"\"\" objects = [] # Итерируемся по списку ЖК for item in self.objects_list:", "'секция': result['section'] = value elif title == 'этаж': result['floor'] = value elif title", "self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content)", "\"\"\" Метод для парсинга данных о парковочном месте :param data: bs4.element.Tag :return: dict", "output['price_base'] = self._price_base(item) # записвыем ссылку на план объекта output['plan'] = self._get_image(item) #", "'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})'", "\"\"\" result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',',", "комнат в квартире, площади + определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict", "if plan == '/img/new-design/no-image.svg': return None return plan except AttributeError: return None def", "output['plan'] = self._get_image(item) # обновляем в словаре ключи с данными для корпуса, секции,", "plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return", "забирает URL :param data: bs4.element.Tag :return: str (image src url) \"\"\" try: plan", "Поиск отдельных объектов парковочных мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row')", "in range(1, pages+1): page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content content =", "None building = None area = None section = None floor = None", "dicts \"\"\" # исходный список объектов который будем возвращать objects = [] raw_data", "self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content)", "продаже квартир в новостройках Возвращает список словарей с данными о квартирах :return: list", "# Запускаем парсер на квартиры и машиноместа. # Данные записываются в json файл", "= None area = None section = None floor = None number =", "in self.objects_list: # забираем имя ЖК и ссылку на его страницу. Добавляем к", "'comment'] class NdvParser: def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats =", "strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError:", "raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по каждому объявлению for", "float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True)", "'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor } return output_dict if __name__", "ЖК и ссылку на его страницу. Добавляем к URL /parking location, url =", "'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale',", "self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict", "записывать данные output = self.parser_dict.copy() # записываем имя ЖК и регион output['complex'] =", "с ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys", "json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с именем", "('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item", "item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value elif title == 'секция': result['section']", "записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку на план объекта output['plan'] =", "Функция принимает на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть", "import requests from bs4 import BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase',", "на объект ЖК :param data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО,", "который будем возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод для парсинга", "= self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = self.base_url_flats +", "except AttributeError: return None def _get_phase(self, data): \"\"\" Метод для поиска очереди строительства", "'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio'", "= int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass # парсинг данных о парковочном месте(метраж,", "Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for i in range(1,", "= dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных о", ":return: str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True )", "записываем имя ЖК и регион output['complex'] = location # записываем данные о парковочном", "'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for i", "bs4.BeautifulSoup :return: int last page number or False \"\"\" pages = data.findAll('a', {'class':", "class_='flats-table__row table-body--row') # в цикле проходим по каждому парковочному месту for item in", "словарей с данными о парковочных местах :return: list of dicts \"\"\" objects =", "item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href')", "return complex except AttributeError: return None def _get_phase(self, data): \"\"\" Метод для поиска", "return last_page return False def _get_image(self, data): \"\"\" Метод для парсинга схемы квартиры", "проходим по каждому объявлению for item in raw_data: # Бремем копию исходного словаря", "except (AttributeError, IndexError): pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError,", "числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', ''))", "\"\"\" Метод для поиска имени ЖК и его региона :param data: bs4.element.Tag :return:", "False def _get_image(self, data): \"\"\" Метод для парсинга схемы квартиры На вход принимает", "number = None urn = data.get('href') parking_url = self.base_url + urn parking_data =", "С помощью регулярного выражения забирает URL :param data: bs4.element.Tag :return: str (image src", "pages: for i in range(1, pages + 1): # добавляем ?page=n к URL", "'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil',", "принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return: list of", "о парковочном месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img = None price_base", "result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод для парсинга данных о парковочном", "AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale =", "list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = []", "парсит данные о квартирах в новостройках + данные о парковочных местах Записывает полученные", "'10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2]", "Записывает полученные данные в json файл :return: list of dicts - if json_file=None", "но в данный момент 0 предложений, берем следующий ЖК if not number: continue", "отдельной квартире в словарь На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup", "месте На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str", "секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными", "будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК и его регион", "list of dicts - if json_file=None :return: json_file - if json_file=True \"\"\" print('Starting", "= self._price_base(item) # записвыем ссылку на план объекта output['plan'] = self._get_image(item) # обновляем", "self.objects_list: # забираем имя ЖК и ссылку на его страницу. Добавляем к URL", "цикле проходим по каждому объявлению for item in raw_data: # Бремем копию исходного", "dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных о продаже", "json файл :return: list of dicts - if json_file=None :return: json_file - if", "if not number: continue # Поиск паджинатора на странице pages = self._find_pagination(content) if", "словарь На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of", "or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text)", "int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat' return", "= self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages =", "Метод возвращает список кортежей с именем ЖК и его URL :param url: str", "парковочному месту for item in raw_data: # Бремем копию исходного словаря с ключами", "'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session = requests.Session()", "помощью регулярного выражения забирает URL :param data: bs4.element.Tag :return: str (image src url)", "Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content content = soup(raw_data,", "поиск номера парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number =", "Метод для парсинга данных о парковочном месте :param data: bs4.element.Tag :return: dict \"\"\"", "словаре ключи с данными для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) #", "'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return", "last_page = int(pages[-2].text) return last_page return False def _get_image(self, data): \"\"\" Метод для", "', '')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ',", "return None def _price_base(self, data): \"\"\" Метод для поиска цены квартиры :param data:", "location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные", "elif title == 'этаж': result['floor'] = value elif title == 'номер': result['number'] =", "class_='tile__link js-tile-link') # в цикле проходим по каждому объявлению for item in raw_data:", "parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red'", "с данными о квартирах :return: list of dicts \"\"\" # исходный список объектов", "= 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self):", "floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = { 'number': number, 'building':", "phase except AttributeError: return None def _price_base(self, data): \"\"\" Метод для поиска цены", "= parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError:", "price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def _get_complex_item(self, data): \"\"\"", "регион output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item) # записываем", "= self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку на план", "= data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError: return None def", "complex except AttributeError: return None def _get_phase(self, data): \"\"\" Метод для поиска очереди", "list of dicts \"\"\" objects = [] # Итерируемся по списку ЖК for", "return phase except AttributeError: return None def _price_base(self, data): \"\"\" Метод для поиска", "'')) except AttributeError: try: price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', ''))", "словаря с ключами в который будем записывать данные output = self.parser_dict.copy() # записываем", "продаже парковочных мест Возвращает список словарей с данными о парковочных местах :return: list", "URL :param data: bs4.element.Tag :return: str (image src url) \"\"\" try: plan =", "= None building = None area = None section = None floor =", "def _get_phase(self, data): \"\"\" Метод для поиска очереди строительства :param data: bs4.element.Tag :return:", "# Поиск отдельных объектов парковочных мест на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row", "том числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ',", "for i in range(1, pages+1): page_url = url + f'?page={i}' raw_data = self.session.get(page_url).content", "of dict \"\"\" result = [] # Поиск отдельных объектов объявлений на странице", "return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах в новостройках", "floor } return output_dict if __name__ == '__main__': ndv = NdvParser() # Запускаем", "objects = [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора", "(площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список который будем", "parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area =", "plan except AttributeError: return None def _get_complex(self, data): \"\"\" Метод для поиска имени", "'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status',", "result = [] # Поиск отдельных объектов парковочных мест на сранице ЖК raw_data", "price_sale = None building = None area = None section = None floor", "output['complex'] = location # записываем данные о парковочном месте (площаь, корпус, секция, этаж,", "pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass #", "output.append((name + f'({location})', self.base_url + urn)) return output def _find_pagination(self, data): \"\"\" Функция", "данный момент 0 предложений, берем следующий ЖК if not number: continue # Поиск", "данные о парковочных местах Записывает полученные данные в json файл :return: list of", "page number or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page", "except AttributeError: return None def _price_base(self, data): \"\"\" Метод для поиска цены квартиры", "'') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки", "# Бремем копию исходного словаря с ключами в который будем записывать данные output", "странице pages = self._find_pagination(content) if pages: for i in range(1, pages + 1):", "soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return", "raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages", "_find_pagination(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Производит поиск пагинатора.", "'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session = requests.Session() self.base_url", "= soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о", "добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self,", "месту for item in raw_data: # Бремем копию исходного словаря с ключами в", "list of dicts \"\"\" result = [] # Поиск отдельных объектов парковочных мест", "title == 'корпус': result['building'] = value elif title == 'секция': result['section'] = value", "(AttributeError, IndexError): pass output_dict = { 'number': number, 'building': building, 'area': area, 'price_sale':", "= data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip()", "кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)', row).group()) #", "raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск", "'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже row", "i in range(1, pages + 1): # добавляем ?page=n к URL page_url =", "(AttributeError, IndexError): pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError):", "= self.get_flats_data() parking = self.get_parking_data() data_result = flats + parking if json_file is", "'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan',", "result def _get_parking_info(self, data): \"\"\" Метод для парсинга данных о парковочном месте :param", "in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn", "новостройках + данные о парковочных местах Записывает полученные данные в json файл :return:", "(image src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url')", "except AttributeError: return None def _get_complex(self, data): \"\"\" Метод для поиска имени ЖК", "building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции try: section =", "предложений>>. Поиск кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number = int(re.search('(?P<number>\\d+)',", "этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными для", "None price_sale = None building = None area = None section = None", "parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass", "На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict", "area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor", "parking if json_file is None: return data_result else: with open('ndv_ru.json', 'w') as file:", "self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects", "= self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages =", "for item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class':", "if 'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat' return result", "parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного", "ЖК и его URL :param url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва,", "title == 'этаж': result['floor'] = value elif title == 'номер': result['number'] = value", "= self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем)", "try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def _price_base(self,", "= { 'number': number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type':", "self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных", "# Итерируемся по списку ЖК for item in self.objects_list: # забираем имя ЖК", "AttributeError: return None def _get_complex(self, data): \"\"\" Метод для поиска имени ЖК и", "корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список который будем возвращать", "= [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на", "= self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция принимает на вход объект", "json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result =", "for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title ==", "= parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск цены", "1): # добавляем ?page=n к URL page_url = self.new_buildings_url + f'?page={i}' raw_data =", "None price_base = None price_sale = None building = None area = None", "квартирах в новостройках + данные о парковочных местах Записывает полученные данные в json", "возвращать result.append(output) return result def _get_parking_info(self, data): \"\"\" Метод для парсинга данных о", ":return: str (image src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan =", "answer = self.session.get(url) # проверка есть ли в продаже парковочне места. Если нет,", "= 'apartment' else: result['type'] = 'flat' return result def _write_flats_data(self, data): \"\"\" Метод", "data parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result = flats + parking", "\"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет название жк, регион и", "output = self.parser_dict.copy() # записываем имя ЖК и его регион output['complex'] = self._get_complex(item)", "забираем имя ЖК и ссылку на его страницу. Добавляем к URL /parking location,", "tile__image. С помощью регулярного выражения забирает URL :param data: bs4.element.Tag :return: str (image", "table-body--row') # в цикле проходим по каждому парковочному месту for item in raw_data:", "возвращает список кортежей с именем ЖК и его URL :param url: str :return:", "get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах в новостройках + данные о", "parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base = parking_data.find('span',", "for item in raw_data: # Бремем копию исходного словаря с ключами в который", "'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except AttributeError: pass #", "parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError, IndexError): pass # парсинг корпуса try:", "self.objects_list = self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных о продаже квартир", "data): \"\"\" Метод для записи данных о отдельной квартире в словарь На вход", "и его URL :param url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО,", "в цикле проходим по каждому объявлению for item in raw_data: # Бремем копию", "pass # поиск ссылки на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img", "IndexError): pass # парсинг этажа try: floor = parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass", "<reponame>nonameists/puls_test import json import re import requests from bs4 import BeautifulSoup as soup", "output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item) # записываем цену", "объектов который будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data,", "pages = self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = url", "файл :return: list of dicts - if json_file=None :return: json_file - if json_file=True", "page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') #", ":return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output =", "странице pages = self._find_pagination(content) if pages: for i in range(1, pages+1): page_url =", "content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content,", "None def _get_complex_item(self, data): \"\"\" Метод для поиска информации о квартире Поиск корпуса,", "data): \"\"\" Метод для поиска имени ЖК и его региона :param data: bs4.element.Tag", "f'({location})', self.base_url + urn)) return output def _find_pagination(self, data): \"\"\" Функция принимает на", "каждому парковочному месту for item in raw_data: # Бремем копию исходного словаря с", "на вход объект класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку на объект", "схемы квартиры На вход принимает bs4.element.Tag. Производит поиск по div классу tile__image. С", "\"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError:", "нет, берем следующий ЖК if answer.status_code == 404: continue raw_data = answer.content content", ":param data: bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor', 'number', 'building') result", "dict \"\"\" keys = ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info =", "квартире в словарь На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return:", "парковочном месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img = None price_base =", "'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in", "tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data =", "'/img/new-design/no-image.svg': return None return plan except AttributeError: return None def _get_complex(self, data): \"\"\"", "data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError: return None def _get_phase(self,", "str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects", "Метод производит поиск кол-ва комнат в квартире, площади + определение типа апартаменты/квартира :param", "self.parser_dict.copy() # записываем имя ЖК и регион output['complex'] = location # записываем данные", "\"\"\" Метод для поиска цены квартиры :param data: bs4.element.Tag :return: str \"\"\" try:", "кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True)", "building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section,", "IndexError): pass # парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass", "= [] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на", "dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value", "предложений, берем следующий ЖК if not number: continue # Поиск паджинатора на странице", "квартиры На вход принимает bs4.element.Tag. Производит поиск по div классу tile__image. С помощью", "цены квартиры :param data: bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True)", "continue # Поиск паджинатора на странице pages = self._find_pagination(content) if pages: for i", "квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными для комнат в квартире,", "+= '/parking' answer = self.session.get(url) # проверка есть ли в продаже парковочне места.", "def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат в квартире, площади +", "в данный момент 0 предложений, берем следующий ЖК if not number: continue #", "регион output['complex'] = location # записываем данные о парковочном месте (площаь, корпус, секция,", "= dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if", "data: bs4.BeautifulSoup :return: int last page number or False \"\"\" pages = data.findAll('a',", "value elif title == 'номер': result['number'] = value return result def _get_dimentions(self, data):", "= [] # Поиск отдельных объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link", "+ urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number =", "о квартирах в новостройках + данные о парковочных местах Записывает полученные данные в", "+ данные о парковочных местах Записывает полученные данные в json файл :return: list", "отдельном парковочном месте На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param", ":return: json_file - if json_file=True \"\"\" print('Starting data parsing...') flats = self.get_flats_data() parking", "self._price_base(item) # записвыем ссылку на план объекта output['plan'] = self._get_image(item) # обновляем в", "ЖК и регион output['complex'] = location # записываем данные о парковочном месте (площаь,", "data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def _price_base(self, data): \"\"\" Метод", "список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод", "Поиск корпуса, секции, этажа и номера квартиры Возвращает словарь с ключами ['section', 'floor',", "file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с", "soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже", "# парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг", "urn)) return output def _find_pagination(self, data): \"\"\" Функция принимает на вход объект класса", "result def _write_parking_data(self, data, location): \"\"\" Метод для записи данных о отдельном парковочном", "= item url += '/parking' answer = self.session.get(url) # проверка есть ли в", "= int(pages[-2].text) return last_page return False def _get_image(self, data): \"\"\" Метод для парсинга", "= data.find_all('div', class_='tile__in-complex-item') for item in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True)", "класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то возвращает номер последней страницы.", "берем следующий ЖК if not number: continue # Поиск паджинатора на странице pages", ":param data: bs4.element.Tag :return: str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link'", "raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content,", "if pages: for i in range(1, pages+1): page_url = url + f'?page={i}' raw_data", "Метод для записи данных о отдельной квартире в словарь На вход принимает объект", "_get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат в квартире, площади + определение", "bs4.element.Tag :return: str \"\"\" try: complex = data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True", "title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value", "его URL :param url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская", "objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция принимает", "именем ЖК и его URL :param url: str :return: list of tuples [('Мкр.", "price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span',", "str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None", "с данными для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в", "return result def _get_parking_info(self, data): \"\"\" Метод для парсинга данных о парковочном месте", "'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name +", "result def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат в квартире, площади", "result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in info: title =", "data): \"\"\" Метод для поиска цены квартиры :param data: bs4.element.Tag :return: str \"\"\"", "'корпус': result['building'] = value elif title == 'секция': result['section'] = value elif title", "для поиска имени ЖК и его региона :param data: bs4.element.Tag :return: str \"\"\"", "\"\"\" Метод для записи данных о отдельном парковочном месте На вход принимает объект", "bs4.element.Tag :return: str (image src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop'] plan", "raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip() urn =", "типа квартиры output.update(self._get_dimentions(item)) # добавляем словарь в список который будем возвращать result.append(output) return", "данные output = self.parser_dict.copy() # записываем имя ЖК и регион output['complex'] = location", "парковочных местах Записывает полученные данные в json файл :return: list of dicts -", "ключами в который будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК", "о отдельном парковочном месте На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup", "i in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content", "= data.get('href') parking_url = self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск", "None def _price_base(self, data): \"\"\" Метод для поиска цены квартиры :param data: bs4.element.Tag", "data: bs4.element.Tag :return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except", "class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def _price_base(self, data): \"\"\" Метод для", "ссылку на объект ЖК :param data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва,", "данные о парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь", "int(re.search('(?P<number>\\d+)', row).group()) # Если страница есть, но в данный момент 0 предложений, берем", "[] # Поиск отдельных объектов парковочных мест на сранице ЖК raw_data = data.find_all('a',", ":param data: bs4.element.Tag :return: dict \"\"\" result = dict() name = data.find('a', {'class':", "return result def _write_parking_data(self, data, location): \"\"\" Метод для записи данных о отдельном", "'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session", "_get_parking_info(self, data): \"\"\" Метод для парсинга данных о парковочном месте :param data: bs4.element.Tag", "ЖК и его региона :param data: bs4.element.Tag :return: str \"\"\" try: complex =", "content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects =", "result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] =", "def _find_pagination(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Производит поиск", "'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url)", "AttributeError: pass # поиск ссылки на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'})", "file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с именем ЖК", "'parking', 'plan': plan_img, 'section': section, 'floor': floor } return output_dict if __name__ ==", "== 'секция': result['section'] = value elif title == 'этаж': result['floor'] = value elif", "re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None return plan except AttributeError: return", "= data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower():", "новостройках Возвращает список словарей с данными о квартирах :return: list of dicts \"\"\"", "'plan': plan_img, 'section': section, 'floor': floor } return output_dict if __name__ == '__main__':", "number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img,", "phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return None def _price_base(self, data):", "со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except", "и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными для комнат", "re import requests from bs4 import BeautifulSoup as soup DICT_KEYS = ['complex', 'type',", "= self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места", "floor = None number = None urn = data.get('href') parking_url = self.base_url +", "section, 'floor': floor } return output_dict if __name__ == '__main__': ndv = NdvParser()", ":return: list of dicts \"\"\" result = [] # Поиск отдельных объектов парковочных", "{'class': 'tile__content'}) for item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location =", ":return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base", "корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции try:", "content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content) if", "план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass", "Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data = data.find_all('div', {'class': 'tile__content'}) for", "кол-ва комнат в квартире, площади + определение типа апартаменты/квартира :param data: bs4.element.Tag :return:", "complex += f'({location})' return complex except AttributeError: return None def _get_phase(self, data): \"\"\"", "last page number or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages:", "data): \"\"\" Метод для парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит поиск", "секции, этажа и номера квартиры Возвращает словарь с ключами ['section', 'floor', 'number', 'building']", "if title == 'корпус': result['building'] = value elif title == 'секция': result['section'] =", "url): \"\"\" Метод возвращает список кортежей с именем ЖК и его URL :param", "of dicts \"\"\" objects = [] # Итерируемся по списку ЖК for item", "print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с именем ЖК и", "данные output = self.parser_dict.copy() # записываем имя ЖК и его регион output['complex'] =", "Поиск отдельных объектов объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в", "Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку", "data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url):", "'номер': result['number'] = value return result def _get_dimentions(self, data): \"\"\" Метод производит поиск", "list of dicts \"\"\" # исходный список объектов который будем возвращать objects =", "number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на план try:", "продаже парковочне места. Если нет, берем следующий ЖК if answer.status_code == 404: continue", "= data.find('div', class_='tile__image')['data-deskstop'] plan = re.search(\"url\\('(?P<url>\\S+)'\\)\", plan).group('url') if plan == '/img/new-design/no-image.svg': return None", "== 'корпус': result['building'] = value elif title == 'секция': result['section'] = value elif", "паджинатора на странице pages = self._find_pagination(content) if pages: for i in range(1, pages+1):", "objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит данные о квартирах", "'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture',", "in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower():", "data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет название жк, регион", "добавляем словарь в список который будем возвращать result.append(output) return result def _write_parking_data(self, data,", "NdvParser: def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url", "следующий ЖК if answer.status_code == 404: continue raw_data = answer.content content = soup(raw_data,", "парсинг данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings')", "objects = [] # Итерируемся по списку ЖК for item in self.objects_list: #", "Метод для поиска цены квартиры :param data: bs4.element.Tag :return: str \"\"\" try: price_base", "output_dict if __name__ == '__main__': ndv = NdvParser() # Запускаем парсер на квартиры", "апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result = dict() name = data.find('a',", "= raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass #", "корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре ключи с", "[('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content", "Метод для парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит поиск по div", "к URL page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data,", "IndexError): pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass", "\"\"\" Метод для парсинга схемы квартиры На вход принимает bs4.element.Tag. Производит поиск по", "'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def", "имя ЖК и регион output['complex'] = location # записываем данные о парковочном месте", "объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :return: list of dict \"\"\" result =", "по div классу tile__image. С помощью регулярного выражения забирает URL :param data: bs4.element.Tag", "info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] =", "типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result = dict() name =", "ссылку на план объекта output['plan'] = self._get_image(item) # обновляем в словаре ключи с", "None def _get_complex(self, data): \"\"\" Метод для поиска имени ЖК и его региона", "= NdvParser() # Запускаем парсер на квартиры и машиноместа. # Данные записываются в", "result = [] # Поиск отдельных объектов объявлений на странице raw_data = data.find_all('div',", "который будем возвращать result.append(output) return result def _write_parking_data(self, data, location): \"\"\" Метод для", "\"\"\" Метод парсит данные о квартирах в новостройках + данные о парковочных местах", "objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\" Метод парсит", "принимает на вход объект класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку на", "of dicts \"\"\" result = [] # Поиск отдельных объектов парковочных мест на", "if pages: last_page = int(pages[-2].text) return last_page return False def _get_image(self, data): \"\"\"", "получения данных о продаже квартир в новостройках Возвращает список словарей с данными о", "= 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list =", "pages: last_page = int(pages[-2].text) return last_page return False def _get_image(self, data): \"\"\" Метод", "в новостройках Возвращает список словарей с данными о квартирах :return: list of dicts", "в json файл :return: list of dicts - if json_file=None :return: json_file -", "soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area',", "в который будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК и", "«Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content content", "n предложений>>. Поиск кол-ва предложений о продаже row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True) number =", "parking_div_data[3].get_text(strip=True) except (AttributeError, IndexError): pass output_dict = { 'number': number, 'building': building, 'area':", "есть, но в данный момент 0 предложений, берем следующий ЖК if not number:", "', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ',", "plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в том числе со", "area = None section = None floor = None number = None urn", "данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data", "self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list = self._get_new_buildings(self.new_buildings_url) def", "'number': number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan':", "план) output.update(self._get_parking_info(item)) # добавляем словарь в список который будем возвращать result.append(output) return result", "# парсинг данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row", "_price_base(self, data): \"\"\" Метод для поиска цены квартиры :param data: bs4.element.Tag :return: str", "вход принимает bs4.element.Tag. Производит поиск по div классу tile__image. С помощью регулярного выражения", "data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def _get_complex_item(self,", "{'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] =", "URL :param url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица", "data: bs4.element.Tag :return: str (image src url) \"\"\" try: plan = data.find('div', class_='tile__image')['data-deskstop']", "value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value elif title ==", "page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') #", "# записвыем ссылку на план объекта output['plan'] = self._get_image(item) # обновляем в словаре", "\"\"\" Метод для поиска очереди строительства :param data: bs4.element.Tag :return: str \"\"\" try:", "+ f'({location})', self.base_url + urn)) return output def _find_pagination(self, data): \"\"\" Функция принимает", "content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений", "\"\"\" keys = ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div',", "def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с именем ЖК и его", "location # записываем данные о парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item))", "цикле проходим по каждому парковочному месту for item in raw_data: # Бремем копию", "(в том числе со скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace('", "bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return", "'building'] :param data: bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor', 'number', 'building')", "return None def _get_phase(self, data): \"\"\" Метод для поиска очереди строительства :param data:", "для получения данных о продаже квартир в новостройках Возвращает список словарей с данными", "[] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser') # Поиск паджинатора на странице", "площади + определение типа апартаменты/квартира :param data: bs4.element.Tag :return: dict \"\"\" result =", "plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass # поиск", "location, url = item url += '/parking' answer = self.session.get(url) # проверка есть", "= parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',',", "парковочном месте (площаь, корпус, секция, этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список", "'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session =", "'number', 'number_on_site', 'rooms', 'floor', 'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price',", "= 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment'", ") location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except AttributeError: return", "для записи данных о отдельной квартире в словарь На вход принимает объект класса", "# проверка есть ли в продаже парковочне места. Если нет, берем следующий ЖК", "of dicts - if json_file=None :return: json_file - if json_file=True \"\"\" print('Starting data", "AttributeError: return None def _get_phase(self, data): \"\"\" Метод для поиска очереди строительства :param", "= ('section', 'floor', 'number', 'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for", "dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия'", "+ f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный", "pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return last_page return", "= soup(raw_data, 'html.parser') # Поиск паджинатора на странице pages = self._find_pagination(content) if pages:", "with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод", "Итерируемся по списку ЖК for item in self.objects_list: # забираем имя ЖК и", "'building') result = dict.fromkeys(keys) info = data.find_all('div', class_='tile__in-complex-item') for item in info: title", "None section = None floor = None number = None urn = data.get('href')", "для поиска информации о квартире Поиск корпуса, секции, этажа и номера квартиры Возвращает", "который будем записывать данные output = self.parser_dict.copy() # записываем имя ЖК и регион", "url += '/parking' answer = self.session.get(url) # проверка есть ли в продаже парковочне", "поиска цены квартиры :param data: bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span',", "'section': section, 'floor': floor } return output_dict if __name__ == '__main__': ndv =", "= value return result def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат", "секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа try:", "по списку ЖК for item in self.objects_list: # забираем имя ЖК и ссылку", "квартиры Возвращает словарь с ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return:", "output.update(self._get_dimentions(item)) # добавляем словарь в список который будем возвращать result.append(output) return result def", "о квартирах :return: list of dicts \"\"\" # исходный список объектов который будем", "output.update(self._get_complex_item(item)) # обновляем в словаре ключи с данными для комнат в квартире, площади", "который будем возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser')", "parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на план try: plan_div =", "def _price_base(self, data): \"\"\" Метод для поиска цены квартиры :param data: bs4.element.Tag :return:", "section = None floor = None number = None urn = data.get('href') parking_url", "добавляем ?page=n к URL page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content", "AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод для поиска информации о квартире", "= soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location))", "return None return plan except AttributeError: return None def _get_complex(self, data): \"\"\" Метод", "item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url + urn)) return output def _find_pagination(self,", "обновляем в словаре ключи с данными для корпуса, секции, этажа и номера квартиры", "json import re import requests from bs4 import BeautifulSoup as soup DICT_KEYS =", "price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale", "pass # поиск цены (в том числе со скидкой) try: price_base = parking_data.find('span',", "словарь с ключами ['section', 'floor', 'number', 'building'] :param data: bs4.element.Tag :return: dict \"\"\"", "if answer.status_code == 404: continue raw_data = answer.content content = soup(raw_data, 'html.parser') #", "список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для", "'sale', 'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session = requests.Session() self.base_url =", "str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except", "pages + 1): # добавляем ?page=n к URL page_url = self.new_buildings_url + f'?page={i}'", "data: bs4.element.Tag :return: dict \"\"\" plan_img = None price_base = None price_sale =", "квартирах :return: list of dicts \"\"\" # исходный список объектов который будем возвращать", "данные в json файл :return: list of dicts - if json_file=None :return: json_file", "на его страницу. Добавляем к URL /parking location, url = item url +=", "return result def _write_flats_data(self, data): \"\"\" Метод для записи данных о отдельной квартире", "о продаже парковочных мест Возвращает список словарей с данными о парковочных местах :return:", "в словаре ключи с данными для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item))", "objects = self._get_objects(content) return objects def _get_objects(self, data): \"\"\" Функция принимает на вход", "'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment' else:", "'in_sale', 'sale_status', 'finished', 'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning',", "get_flats_data(self): \"\"\" Метод для получения данных о продаже квартир в новостройках Возвращает список", "парсинга данных о парковочном месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img =", "следующий ЖК if not number: continue # Поиск паджинатора на странице pages =", "data: bs4.element.Tag :return: dict \"\"\" keys = ('section', 'floor', 'number', 'building') result =", "'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site', 'rooms', 'floor', 'in_sale',", "class_='card__info-params__number') # парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except", "raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError:", "return result def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат в квартире,", "= self.parser_dict.copy() # записываем имя ЖК и его регион output['complex'] = self._get_complex(item) #", "except (AttributeError, IndexError): pass output_dict = { 'number': number, 'building': building, 'area': area,", "= [] # Итерируемся по списку ЖК for item in self.objects_list: # забираем", "plan_img = None price_base = None price_sale = None building = None area", "content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else: objects =", "_write_flats_data(self, data): \"\"\" Метод для записи данных о отдельной квартире в словарь На", "building = None area = None section = None floor = None number", "ссылку на его страницу. Добавляем к URL /parking location, url = item url", "'floor': floor } return output_dict if __name__ == '__main__': ndv = NdvParser() #", "в исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\"", "словаре ключи с данными для комнат в квартире, площади + типа квартиры output.update(self._get_dimentions(item))", "исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None): \"\"\"", "пагинатора. Если он есть то возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return:", "= data.find( 'a', class_='tile__resale-complex--link js_tile_complex_link' ).get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex", "try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except AttributeError: pass #", "self.base_url + urn parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number", "# парсинг площади try: raw_area = parking_div_data[0].get_text(strip=True).split()[0] area = float(raw_area.replace(',', '.')) except (AttributeError,", "ЖК :param data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица", "поиска информации о квартире Поиск корпуса, секции, этажа и номера квартиры Возвращает словарь", "регион и ссылку на объект ЖК :param data: bs4.BeautifulSoup :return: list of tuples", "result['building'] = value elif title == 'секция': result['section'] = value elif title ==", "section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа try: floor =", "return price_base except AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод для поиска", "данными для корпуса, секции, этажа и номера квартиры output.update(self._get_complex_item(item)) # обновляем в словаре", "возвращать objects = [] raw_data = self.session.get(self.base_url_flats).content content = soup(raw_data, 'html.parser') # Поиск", "self.get_parking_data() data_result = flats + parking if json_file is None: return data_result else:", "open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает", "и регион output['complex'] = location # записываем данные о парковочном месте (площаь, корпус,", "?page=n к URL page_url = self.new_buildings_url + f'?page={i}' raw_data = self.session.get(page_url).content content =", "в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects def get_full_data(self, json_file=None):", "bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output", "None def _get_phase(self, data): \"\"\" Метод для поиска очереди строительства :param data: bs4.element.Tag", "in range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content =", "def get_flats_data(self): \"\"\" Метод для получения данных о продаже квартир в новостройках Возвращает", "pages: for i in range(1, pages+1): page_url = url + f'?page={i}' raw_data =", "result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else:", "as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей", ":param data: bs4.element.Tag :return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase", "except AttributeError: pass # поиск ссылки на план try: plan_div = parking_data.find('div', {'id':", "name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.')) if 'студия' in", "if json_file is None: return data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result,", "= parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True)", "то возвращает номер последней страницы. :param data: bs4.BeautifulSoup :return: int last page number", "объявлений на странице raw_data = data.find_all('div', class_='tile__link js-tile-link') # в цикле проходим по", "in info: title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building']", "ссылки на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src') except", "output = self.parser_dict.copy() # записываем имя ЖК и регион output['complex'] = location #", "улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" objects = [] raw_data = self.session.get(url).content content = soup(raw_data, 'html.parser')", "в новостройках + данные о парковочных местах Записывает полученные данные в json файл", "парсинг корпуса try: building = parking_div_data[1].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг секции", "# в цикле проходим по каждому объявлению for item in raw_data: # Бремем", "DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number',", "= self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content)) else:", "bs4.BeautifulSoup :param location: str :return: list of dicts \"\"\" result = [] #", "data, location): \"\"\" Метод для записи данных о отдельном парковочном месте На вход", "_get_phase(self, data): \"\"\" Метод для поиска очереди строительства :param data: bs4.element.Tag :return: str", "парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '')", "data: bs4.BeautifulSoup :return: list of dict \"\"\" result = [] # Поиск отдельных", "парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError): pass # парсинг этажа", "price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError: try: price_base", "'price_sale': price_sale, 'price_base': price_base, 'type': 'parking', 'plan': plan_img, 'section': section, 'floor': floor }", "bs4.element.Tag :return: dict \"\"\" result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area']", "if json_file=None :return: json_file - if json_file=True \"\"\" print('Starting data parsing...') flats =", "объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return: list of dicts", "output_dict = { 'number': number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base,", "= [] raw_data = data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name =", "его регион output['complex'] = self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item) #", "for i in range(1, pages + 1): # добавляем ?page=n к URL page_url", "objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для получения", "parking_data.find('meta', {'content': '10'}) if raw_number: number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number =", "# добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def", "= plan_div.find('img').get('src') except AttributeError: pass # поиск цены (в том числе со скидкой)", "def _write_flats_data(self, data): \"\"\" Метод для записи данных о отдельной квартире в словарь", "self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki' self.parser_dict = dict.fromkeys(DICT_KEYS) self.objects_list", ":return: list of dicts \"\"\" # исходный список объектов который будем возвращать objects", "def _get_objects(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет название", ").get_text( strip=True ) location = data.find('span', class_='tile__location').get_text(strip=True) complex += f'({location})' return complex except", "item.find('span', {'class': 'tile__location'}).get_text().strip() urn = item.select_one('a', {'class': 'tile__name'}).get('href') output.append((name + f'({location})', self.base_url +", "'/parking' answer = self.session.get(url) # проверка есть ли в продаже парковочне места. Если", "{ 'number': number, 'building': building, 'area': area, 'price_sale': price_sale, 'price_base': price_base, 'type': 'parking',", "= value elif title == 'номер': result['number'] = value return result def _get_dimentions(self,", ":return: list of dict \"\"\" result = [] # Поиск отдельных объектов объявлений", "parking_data = soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number = parking_data.find('meta', {'content':", "location): \"\"\" Метод для записи данных о отдельном парковочном месте На вход принимает", "print('Starting data parsing...') flats = self.get_flats_data() parking = self.get_parking_data() data_result = flats +", "Метод для записи данных о отдельном парковочном месте На вход принимает объект класса", "классу tile__image. С помощью регулярного выражения забирает URL :param data: bs4.element.Tag :return: str", "'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self, url): \"\"\" Метод возвращает список", "data): \"\"\" Метод производит поиск кол-ва комнат в квартире, площади + определение типа", "item in raw_data: name = item.select_one('a', {'class': 'tile__name'}).text.strip() location = item.find('span', {'class': 'tile__location'}).get_text().strip()", "__init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru' self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats' self.new_buildings_url = 'https://www.ndv.ru/novostrojki'", "'discount_percent', 'discount', 'comment'] class NdvParser: def __init__(self): self.session = requests.Session() self.base_url = 'https://www.ndv.ru'", "if __name__ == '__main__': ndv = NdvParser() # Запускаем парсер на квартиры и", ":return: dict \"\"\" plan_img = None price_base = None price_sale = None building", "'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_parking_data(content, location)) else: objects.extend(self._write_parking_data(content, location)) return objects", "'этаж': result['floor'] = value elif title == 'номер': result['number'] = value return result", "\"\"\" Метод для поиска информации о квартире Поиск корпуса, секции, этажа и номера", "tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = [] raw_data =", "с данными о парковочных местах :return: list of dicts \"\"\" objects = []", "else: result['type'] = 'flat' return result def _write_flats_data(self, data): \"\"\" Метод для записи", "bs4.BeautifulSoup. Ищет название жк, регион и ссылку на объект ЖК :param data: bs4.BeautifulSoup", "price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None", "(AttributeError, IndexError): pass # парсинг секции try: section = parking_div_data[2].get_text(strip=True) except (AttributeError, IndexError):", "план объекта output['plan'] = self._get_image(item) # обновляем в словаре ключи с данными для", "0 предложений, берем следующий ЖК if not number: continue # Поиск паджинатора на", "и ссылку на объект ЖК :param data: bs4.BeautifulSoup :return: list of tuples [('Мкр.", "AttributeError: pass # парсинг данных о парковочном месте(метраж, копус, секцияб этаж) parking_div_info =", "try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на план", "bs4 import BeautifulSoup as soup DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base',", "result def _write_flats_data(self, data): \"\"\" Метод для записи данных о отдельной квартире в", "dict \"\"\" result = dict() name = data.find('a', {'class': 'tile__name'}).get_text(strip=True) result['area'] = float(name.split()[-1].replace('м²',", "= int(''.join(price_base.split()[:3])) return price_base except AttributeError: return None def _get_complex_item(self, data): \"\"\" Метод", "обновляем в словаре ключи с данными для комнат в квартире, площади + типа", "item url += '/parking' answer = self.session.get(url) # проверка есть ли в продаже", "Добавляем к URL /parking location, url = item url += '/parking' answer =", "паджинатора на странице pages = self._find_pagination(content) if pages: for i in range(1, pages", "raw_data = self.session.get(page_url).content content = soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._write_flats_data(content))", "объект класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку на объект ЖК :param", "self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) # записвыем ссылку на план объекта", "записываем очередь строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) #", "'')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', ''))", "\"\"\" result = [] # Поиск отдельных объектов объявлений на странице raw_data =", "= int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type'] = 'flat'", "с ключами в который будем записывать данные output = self.parser_dict.copy() # записываем имя", "data: bs4.BeautifulSoup :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\"", ":param data: bs4.BeautifulSoup :return: int last page number or False \"\"\" pages =", "item.select_one('.tile__in-complex-title').get_text(strip=True).lower() value = item.select_one('.tile__in-complex-value').get_text(strip=True) if title == 'корпус': result['building'] = value elif title", "flats + parking if json_file is None: return data_result else: with open('ndv_ru.json', 'w')", "очередь строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base'] = self._price_base(item) # записвыем", "месте :param data: bs4.element.Tag :return: dict \"\"\" plan_img = None price_base = None", "result.append(output) return result def _write_parking_data(self, data, location): \"\"\" Метод для записи данных о", "continue raw_data = answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать n", "if 'студия' in name.split()[0].lower(): result['rooms'] = 'studio' else: result['rooms'] = int(name.split('-')[0]) if 'апартамент'", "url: str :return: list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\"", "== '/img/new-design/no-image.svg': return None return plan except AttributeError: return None def _get_complex(self, data):", "False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if pages: last_page = int(pages[-2].text) return", ":return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return phase except AttributeError: return", "pages = self._find_pagination(content) if pages: for i in range(1, pages+1): page_url = self.base_url_flats", "'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount', 'comment'] class", "title == 'секция': result['section'] = value elif title == 'этаж': result['floor'] = value", "range(1, pages+1): page_url = self.base_url_flats + f'?page={i}' raw_data = self.session.get(page_url).content content = soup(raw_data,", "сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по каждому", "словарь в список который будем возвращать result.append(output) return result def _write_parking_data(self, data, location):", ":param data: bs4.element.Tag :return: str \"\"\" try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base =", "ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по каждому парковочному", "этаж, план) output.update(self._get_parking_info(item)) # добавляем словарь в список который будем возвращать result.append(output) return", "value return result def _get_dimentions(self, data): \"\"\" Метод производит поиск кол-ва комнат в", "[] raw_data = data.find_all('div', {'class': 'tile__content'}) for item in raw_data: name = item.select_one('a',", "= self._get_complex(item) # записываем очередь строительства output['phase'] = self._get_phase(item) # записываем цену output['price_base']", "None area = None section = None floor = None number = None", "момент 0 предложений, берем следующий ЖК if not number: continue # Поиск паджинатора", "self._get_new_buildings(self.new_buildings_url) def get_flats_data(self): \"\"\" Метод для получения данных о продаже квартир в новостройках", "soup(raw_data, 'html.parser') # добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return", "\"\"\" result = [] # Поиск отдельных объектов парковочных мест на сранице ЖК", "= ['complex', 'type', 'phase', 'building', 'section', 'price_base', 'price_finished', 'price_sale', 'price_finished_sale', 'area', 'number', 'number_on_site',", "return data_result else: with open('ndv_ru.json', 'w') as file: json.dump(data_result, file) print('Success') def _get_new_buildings(self,", "404: continue raw_data = answer.content content = soup(raw_data, 'html.parser') # Поиск кнопки <<Показать", "in range(1, pages + 1): # добавляем ?page=n к URL page_url = self.new_buildings_url", "bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return: list of dicts \"\"\" result", "list of tuples [('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')] \"\"\" output = []", ":param data: bs4.element.Tag :return: dict \"\"\" plan_img = None price_base = None price_sale", "на сранице ЖК raw_data = data.find_all('a', class_='flats-table__row table-body--row') # в цикле проходим по", "парковочном месте(метраж, копус, секцияб этаж) parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div',", "мест Возвращает список словарей с данными о парковочных местах :return: list of dicts", "class_='card__info-prices__old').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True)", "data: bs4.BeautifulSoup :param location: str :return: list of dicts \"\"\" result = []", "номер последней страницы. :param data: bs4.BeautifulSoup :return: int last page number or False", "На вход принимает объект класса bs4.BeautifulSoup :param data: bs4.BeautifulSoup :param location: str :return:", "str :return: list of dicts \"\"\" result = [] # Поиск отдельных объектов", "try: price_base = data.find('span', class_='tile__price').get_text(strip=True) price_base = int(''.join(price_base.split()[:3])) return price_base except AttributeError: return", "objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод для получения данных о", "'currency', 'ceil', 'article', 'finishing_name', 'furniture', 'furniture_price', 'plan', 'feature', 'view', 'euro_planning', 'sale', 'discount_percent', 'discount',", "parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings') parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number') # парсинг площади try:", "исходный список objects.extend(self._write_flats_data(content)) else: objects = self._write_flats_data(content) return objects def get_parking_data(self): \"\"\" Метод", "else: result['rooms'] = int(name.split('-')[0]) if 'апартамент' in name.lower(): result['type'] = 'apartment' else: result['type']", "выражения забирает URL :param data: bs4.element.Tag :return: str (image src url) \"\"\" try:", "добавляем(объединяем) в исходный список objects.extend(self._get_objects(content)) else: objects = self._get_objects(content) return objects def _get_objects(self,", "поиска имени ЖК и его региона :param data: bs4.element.Tag :return: str \"\"\" try:", "строительства :param data: bs4.element.Tag :return: str \"\"\" try: phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True) return", "информации о квартире Поиск корпуса, секции, этажа и номера квартиры Возвращает словарь с", "скидкой) try: price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True) price_base = int(price_base.split('руб.')[0].replace(' ', '')) except AttributeError:", "поиск ссылки на план try: plan_div = parking_data.find('div', {'id': 'plans_layout'}) plan_img = plan_div.find('img').get('src')", "# добавляем словарь в список который будем возвращать result.append(output) return result def _write_parking_data(self,", "price_sale = parking_data.find( 'span', class_='card__info-prices__price card__info-prices--red' ).get_text(strip=True) price_sale = int(price_sale.split('руб.')[0].replace(' ', '')) except", "objects def _get_objects(self, data): \"\"\" Функция принимает на вход объект класса bs4.BeautifulSoup. Ищет", "вход объект класса bs4.BeautifulSoup. Ищет название жк, регион и ссылку на объект ЖК", "number = raw_number.previous.strip().split()[1].replace('№', '') else: try: number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2] except AttributeError: pass", "int last page number or False \"\"\" pages = data.findAll('a', {'class': 'move-to-page'}) if", "жк, регион и ссылку на объект ЖК :param data: bs4.BeautifulSoup :return: list of", "объект класса bs4.BeautifulSoup. Производит поиск пагинатора. Если он есть то возвращает номер последней", "class_='title').get_text(strip=True).split()[2] except AttributeError: pass # поиск ссылки на план try: plan_div = parking_data.find('div',", "Метод для получения данных о продаже квартир в новостройках Возвращает список словарей с", "soup(self.session.get(parking_url).content, 'html.parser') # поиск номера парковочного места raw_number = parking_data.find('meta', {'content': '10'}) if", "_get_new_buildings(self, url): \"\"\" Метод возвращает список кортежей с именем ЖК и его URL", "\"\"\" Метод для записи данных о отдельной квартире в словарь На вход принимает" ]
[ "self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def", "= \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank", "= \"2\", suit = \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") ) def", "[ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds,", "\"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card),", "Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card),", "= \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of", "Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit =", "def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS,", "test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError):", "suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5", "\"Hearts\") eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank =", "import unittest from poker.card import Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank", "Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit =", "\"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank", "= Card(rank = \"King\", suit = \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual(", "test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards =", "test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\") king_of_spades = Card(rank =", "= \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank", "test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card =", "def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit = \"Spades\") five_of_diamonds = Card(rank", "suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit", "\"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit", "= queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is not sorting", "Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual(", "(\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\",", "queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\") king_of_spades = Card(rank = \"King\",", "Card(rank = \"King\", suit = \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation,", "test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self):", "cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\")", ") def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self):", "card = Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card", "self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank =", "suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts =", "= Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit", "suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank", "sorting the lower card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank = \"2\",", "Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\",", "\"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def", "= \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual(", "Card(rank = \"Queen\", suit = \"Spades\") king_of_spades = Card(rank = \"King\", suit =", "import Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\", suit =", "def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with", "algorithm is not sorting the lower card first\" ) def test_sorts_cards(self): two_of_spades =", "\"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) )", "\"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank =", "king_of_spades = Card(rank = \"King\", suit = \"Spades\") evaluation = queen_of_spades < king_of_spades", "= Card(rank = \"2\", suit = \"Spades\") five_of_diamonds = Card(rank = \"5\", suit", "\"5\", suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs", "= Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\")", "\"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self):", "\"Queen\", suit = \"Spades\") king_of_spades = Card(rank = \"King\", suit = \"Spades\") evaluation", "= \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit", "\"Spades\") five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank =", "Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\",", "= \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\")", "\"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit =", "= \"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts", "\"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit", "suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\")", "\"King\", suit = \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The", "self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def", "\"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit =", "= \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades =", "\"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\",", "= \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\")", "self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards),", "unittest from poker.card import Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank =", "five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds, five_of_hearts,", "= \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank =", "Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual(", "suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards =", "Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank", "\"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\") king_of_spades", "= Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\",", "\"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ]", "of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self):", "\"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank", "= \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank =", "self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1],", "self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index,", "\"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank = \"2\",", "\"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\")", "\"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",", "= \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual(", "9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self):", "\"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is", "king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is not sorting the lower card", "\"Diamonds\") five_of_hearts = Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank =", "five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds, five_of_hearts, eight_of_hearts,", "suit = \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort", "class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank,", "\"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\")", "= \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") ) def", "= \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\",", "card = Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card", "test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit =", "= \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm", "= \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank", "\"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with", "= \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank =", "= Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\") )", "card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit = \"Spades\")", "first\" ) def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit = \"Spades\") five_of_diamonds", "\"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError):", "= \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards = [", "= \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit", "def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank = \"2\", suit", "def test_has_suit(self): card = Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def", "card = Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card", "\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\"", "suit = \"Spades\") king_of_spades = Card(rank = \"King\", suit = \"Spades\") evaluation =", "True, \"The sort algorithm is not sorting the lower card first\" ) def", "\"The sort algorithm is not sorting the lower card first\" ) def test_sorts_cards(self):", "Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit =", "self.assertEqual( evaluation, True, \"The sort algorithm is not sorting the lower card first\"", "test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",", "Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs,", "= [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades,", "def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card", "\"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\",", "\"5\", suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts", "cards[0], Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\",", "ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds, five_of_hearts, eight_of_hearts, ace_of_clubs ]", "self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\",", "suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort()", "\"Spades\") king_of_spades = Card(rank = \"King\", suit = \"Spades\") evaluation = queen_of_spades <", "= Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card =", "self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\",", "\"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self):", "\"Queen\") def test_has_suit(self): card = Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\")", "test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank = \"2\", suit =", "\"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit =", "Card(rank = \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\",", "test_has_rank(self): card = Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self):", "def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",", "queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is not sorting the", "unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [", "= \"Spades\") five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank", "\"8\", suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards", "= \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\")", "CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\")", "is not sorting the lower card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank", "two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds, five_of_hearts, eight_of_hearts, ace_of_clubs", "sort algorithm is not sorting the lower card first\" ) def test_sorts_cards(self): two_of_spades", "\"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\")", "\"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank =", "\"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts,", "test_has_suit(self): card = Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self):", "\"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self):", "def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0], Card(rank = \"2\", suit", "test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit = \"Spades\") five_of_diamonds = Card(rank =", "evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is not", "card = Card(\"5\", \"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\",", "self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit,", "= \"2\", suit = \"Spades\") five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\")", "cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank =", ") def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\") king_of_spades =", "\"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self):", "\"2\", suit = \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self):", "def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit = \"Spades\") king_of_spades = Card(rank", "self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank =", "suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") )", "= \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\")", "= Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades,", "test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, (", ") def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\",", "with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards()", ") def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank = \"2\",", "from poker.card import Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\",", "Card(rank = \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank =", "def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS,", "def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards", "Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank", "def test_has_rank(self): card = Card(rank = \"Queen\", suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def", "= \"Queen\", suit = \"Spades\") king_of_spades = Card(rank = \"King\", suit = \"Spades\")", "= \"Spades\") king_of_spades = Card(rank = \"King\", suit = \"Spades\") evaluation = queen_of_spades", "Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card = Card(\"5\",", "'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual(", "self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\")", "two_of_spades = Card(rank = \"2\", suit = \"Spades\") five_of_diamonds = Card(rank = \"5\",", "suit = \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank = \"2\", suit", "def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def", "\"Clubs\", \"Spades\", \"Diamonds\") ) def test_card_has_thirteen_possible_rank_options(self): self.assertEqual( Card.RANKS, ( \"2\", \"3\", \"4\", \"5\",", "\"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds, two_of_spades, ace_of_clubs, eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards,", "\"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit = \"Hearts\")", "test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\",", "= \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit =", "\"Diamonds\") self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5',", "the lower card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit", "= Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit", "\"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\",", "self.assertEqual(str(card), \"5 of Diamonds\") def test_has_technical_representation(self): card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\")", "Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit", "( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\",", "suit = \"Spades\") five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts =", ") ) def test_card_only_allows_for_valid_rank(self): with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def", "lower card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit =", ") def test_sorts_cards(self): two_of_spades = Card(rank = \"2\", suit = \"Spades\") five_of_diamonds =", "eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\",", "Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\", suit = \"Hearts\")", "= Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs = Card(rank = \"Ace\", suit", "suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank = \"2\", suit = \"Hearts\"),", "Card.RANKS, ( \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\",", "= \"King\", suit = \"Spades\") evaluation = queen_of_spades < king_of_spades self.assertEqual( evaluation, True,", "ace_of_clubs = Card(rank = \"Ace\", suit = \"Clubs\") unsorted_cards = [ five_of_hearts, five_of_diamonds,", "= Card(rank = \"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card =", "\"2\", suit = \"Clubs\") self.assertEqual(card.suit, \"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\",", "self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual( Card(rank", "suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades = Card(rank = \"Queen\", suit =", "Card(rank = \"2\", suit = \"Spades\") five_of_diamonds = Card(rank = \"5\", suit =", "poker.card import Card class CardTest(unittest.TestCase): def test_has_rank(self): card = Card(rank = \"Queen\", suit", "= \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank = \"Ace\", suit =", "\"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52) self.assertEqual( cards[0],", "five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\",", "= \"5\", suit = \"Diamonds\") five_of_hearts = Card(rank = \"5\", suit = \"Hearts\")", "\"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\") )", "five_of_hearts = Card(rank = \"5\", suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\",", "card = Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\",", "Card(\"5\", \"Diamonds\") self.assertEqual(repr(card), \"Card('5', 'Diamonds')\") def test_card_has_four_possible_suit_options(self): self.assertEqual( Card.SUITS, (\"Hearts\", \"Clubs\", \"Spades\", \"Diamonds\")", "\"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank = \"2\", suit = \"Clubs\")", "52) self.assertEqual( cards[0], Card(rank = \"2\", suit = \"Hearts\") ) self.assertEqual( cards[-1], Card(rank", "= Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9) def test_has_string_representation_with_rank_and_suit(self): card =", "evaluation, True, \"The sort algorithm is not sorting the lower card first\" )", "eight_of_hearts ] unsorted_cards.sort() self.assertEqual( unsorted_cards, [ two_of_spades, five_of_diamonds, five_of_hearts, eight_of_hearts, ace_of_clubs ] )", "\"2\", suit = \"Spades\") five_of_diamonds = Card(rank = \"5\", suit = \"Diamonds\") five_of_hearts", "suit = \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") ) def test_card_can_sort_itself_with_another_one(self): queen_of_spades", "not sorting the lower card first\" ) def test_sorts_cards(self): two_of_spades = Card(rank =", "= \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank = \"2\", suit = \"Dots\") def", "with self.assertRaises(ValueError): Card(rank = \"Two\", suit = \"Hearts\") def test_card_only_allows_for_valid_suit(self): with self.assertRaises(ValueError): Card(rank", ") self.assertEqual( cards[-1], Card(rank = \"Ace\", suit = \"Diamonds\") ) def test_figures_out_if_two_cards_are_equal(self): self.assertEqual(", "Card(rank = \"2\", suit = \"Dots\") def test_can_create_standard_52_cards(self): cards = Card.create_standard_52_cards() self.assertEqual(len(cards), 52)", "\"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" ) ) def", "Card(rank = \"2\", suit = \"Hearts\"), Card(rank = \"2\", suit = \"Hearts\") )", "suit = \"Hearts\") eight_of_hearts = Card(rank = \"8\", suit = \"Hearts\") ace_of_clubs =", "< king_of_spades self.assertEqual( evaluation, True, \"The sort algorithm is not sorting the lower", "= \"Hearts\") self.assertEqual(card.rank, \"Queen\") def test_has_suit(self): card = Card(rank = \"2\", suit =", "= Card(rank = \"Queen\", suit = \"Spades\") king_of_spades = Card(rank = \"King\", suit", "\"Clubs\") def test_knows_its_rank_index(self): card = Card(rank = \"Jack\", suit = \"Hearts\") self.assertEqual(card.rank_index, 9)", "\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\" )" ]
[ "view.called is True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self,", "assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query =", "User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_not_found(self): view =", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_list_api_view_get(self):", "assert view.called is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class", "assert view.called is True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def", "def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args", "class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args = (request,", "class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called", "test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args =", "**kwargs): self.partial_called = True self.partial_call_args = (request, args, kwargs) def update(self, request, *args,", "view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert", "= MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test", "arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "= MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "<filename>tests/test_generics.py from unittest import TestCase, mock from pyramid import testing from pyramid.httpexceptions import", "view = MockListApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self):", "query = view.get_query() assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request =", "sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from marshmallow import Schema, fields from", "data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True", "== self.request def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request =", "is True assert view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called", "self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id':", "self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view =", "destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args = (request, args, kwargs) def", "== 1 assert instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView() view.request =", "kwargs) def destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args = (request, args,", "request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is", "def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args", "Query from marshmallow import Schema, fields from pyramid_restful import generics from pyramid_restful.filters import", "import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer", "test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view =", "data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test')", "= True self.call_args = (request, args, kwargs) view = MockUpdateApiView() data = ('test", "(User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def", "class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args = (request,", "(request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg':", "arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args", "request', 'test arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args == data view.post('test", "= User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def", "request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data view.put('test", "def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self):", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):", "= self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView()", "return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session = sessionmaker()", "view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data", "view = UserOverrideView() view.request = self.request query = view.get_query() assert view.paginate_query(query) == None", "test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args =", "test_no_paginator(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert view.paginate_query(query) ==", "self.list_call_args = (request, args, kwargs) def create(self, request, *args, **kwargs): self.called = True", "view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called", "get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session()", "is True assert view.r_call_args == data assert view.d_called is True assert view.d_call_args ==", "1} instance = view.get_object() assert isinstance(instance, User) assert instance.id == 1 assert instance.name", "= self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView):", "args, kwargs) view = MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args = (request, args,", "UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results)", "= MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test", "= (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id') def get_query(self):", "declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String)", "name = fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class =", "sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert", "def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args", "**kwargs): self.p_called = True self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data", "schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request", "= self.request query = view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view =", "'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args ==", "('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True", "view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is", "MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg',", "view.called is True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self,", "request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data def", "(FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id')", "{'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request = self.request schema", "MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called = True self.list_call_args = (request, args,", "pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ =", "class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing')", "view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called", "**kwargs): return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase):", "self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self):", "name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request =", "== 'testing' def test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id':", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert", "MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg',", "= view.get_query() assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request", "1 assert instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView() view.request = self.request", "**kwargs): self.list_called = True self.list_call_args = (request, args, kwargs) def create(self, request, *args,", "{'id': 1} instance = view.get_object() assert isinstance(instance, User) assert instance.id == 1 assert", "def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view", "assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs):", "= self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance, User) assert", "import declarative_base from sqlalchemy.orm.query import Query from marshmallow import Schema, fields from pyramid_restful", "test_kwarg='test') assert view.list_called is True assert view.list_call_args == data view.post('test request', 'test arg',", "User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_override(self): view =", "= UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert", "self.p_called = True self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data =", "view.r_called is True assert view.r_call_args == data assert view.d_called is True assert view.d_call_args", "from marshmallow import Schema, fields from pyramid_restful import generics from pyramid_restful.filters import FieldFilter", "fields from pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base", "= Column(String) class UserSchema(Schema): id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model", "True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args,", "def test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance", "self.call_args = (request, args, kwargs) view = MockListCreateApiView() data = ('test request', ('test", "view = MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "import TestCase, mock from pyramid import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy", "is True assert view.d_call_args == data assert view.u_called is True assert view.u_call_args ==", "kwargs) def update(self, request, *args, **kwargs): self.u_called = True self.u_call_args = (request, args,", "partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args = (request, args, kwargs) def", "= self.request query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView()", "MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "= True self.d_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.u_called", "request, *args, **kwargs): self.p_called = True self.p_call_args = (request, args, kwargs) view =", "True self.r_call_args = (request, args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called =", "view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all()", "== data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called =", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockCreateApiView() data", "generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request =", "True assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args,", "def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args", "view = MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request',", "self.called = True self.call_args = (request, args, kwargs) view = MockListApiView() data =", "MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session =", "assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs):", "True self.call_args = (request, args, kwargs) view = MockListCreateApiView() data = ('test request',", "view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view = UserAPIView() view.request = self.request", "def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True self.call_args", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_update_api_view_partial_update(self):", "True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args,", "view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args == data", "UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance,", "self.call_args = (request, args, kwargs) view = MockCreateApiView() data = ('test request', ('test", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self):", "def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockUpdateApiView() data", "from unittest import TestCase, mock from pyramid import testing from pyramid.httpexceptions import HTTPNotFound", "view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView() view.request = self.request query =", "(request, args, kwargs) view = MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg':", "MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg',", "data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called = True", "3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request = self.request schema =", "def test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema = view.get_schema() assert isinstance(schema,", "= True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test", "(request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg':", "'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data view.put('test request',", "= self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view =", "data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args ==", "= User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self):", "= True self.list_call_args = (request, args, kwargs) def create(self, request, *args, **kwargs): self.called", "request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is", "MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args", "view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request',", "= (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request',", "= (request, args, kwargs) view = MockUpdateApiView() data = ('test request', ('test arg',),", "from pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base =", "view.called is True assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self,", "self.call_args = (request, args, kwargs) view = MockRetrieveApiView() data = ('test request', ('test", "self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view = UserAPIView()", "partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args = (request, args, kwargs) view", "assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def", "arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is True assert", "test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args =", "cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user)", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveApiView()", "generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class User(Base):", "import HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker", "request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is", "(request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg':", "self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id == 1 def", "class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view", "def destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args = (request, args, kwargs)", "{'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "instance = view.get_object() assert isinstance(instance, User) assert instance.id == 1 assert instance.name ==", "sqlalchemy.orm.query import Query from marshmallow import Schema, fields from pyramid_restful import generics from", "self.partial_called = True self.partial_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs):", "def list(self, request, *args, **kwargs): self.list_called = True self.list_call_args = (request, args, kwargs)", "MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args ==", "*args, **kwargs): self.r_called = True self.r_call_args = (request, args, kwargs) def destroy(self, request,", "True assert view.d_call_args == data assert view.u_called is True assert view.u_call_args == data", "MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg',", "self.request query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request", "User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls):", "= {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request = self.request", "view.get_object) def test_get_schema(self): view = UserAPIView() view.request = self.request schema = view.get_schema() assert", "UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results)", "kwargs) def partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args = (request, args,", "import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from marshmallow import", "view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self,", "instance.id == 1 assert instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request", "MockListApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema):", "= True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called", "test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema)", "self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request", "kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockListCreateApiView()", "test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request',", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_api_view_get(self):", "name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close()", "assert view.u_called is True assert view.u_call_args == data assert view.p_called is True assert", "view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self,", "assert view.called is True assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def", "self.partial_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.called = True", "'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession():", "== data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args", "request, *args, **kwargs): self.d_called = True self.d_call_args = (request, args, kwargs) def update(self,", "fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class", "self.called = True self.call_args = (request, args, kwargs) view = MockUpdateApiView() data =", "def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called = True self.list_call_args", "UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def", "== data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called =", "view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView()", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView()", "self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request = self.request query = view.get_query() assert", "arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data", "= fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class = mock.Mock()", "testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request = self.request query", "data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True", "= (request, args, kwargs) def update(self, request, *args, **kwargs): self.u_called = True self.u_call_args", "assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView() view.request", "= UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def", "kwargs) view = MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession", "kwargs) def update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "kwargs) view = MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "(request, args, kwargs) view = MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg':", "view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request',", "UserAPIView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] ==", "assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs):", "assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request,", "view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id == 1 def test_filter_query_empty(self): view =", "lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return", "True assert view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is", "= testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request = self.request", "= get_dbsession() user = User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2)", "arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test", "view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView()", "MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView() view.request = self.request", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_destroy_api_view_delete(self): class", "Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id = fields.Integer() name = fields.String()", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert", "view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called", "fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class = mock.Mock() filter_classes", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert", "def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called = True self.call_args", "args, kwargs) view = MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self):", "(request, args, kwargs) def update(self, request, *args, **kwargs): self.u_called = True self.u_call_args =", "args, kwargs) view = MockListApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self):", "view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called", "view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self,", "assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs):", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveApiView() data", "= True self.call_args = (request, args, kwargs) view = MockListCreateApiView() data = ('test", "model = User schema_class = UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields", "Column, String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query", "import Schema, fields from pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine =", "assert view.r_called is True assert view.r_call_args == data assert view.d_called is True assert", "= fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema", "update(self, request, *args, **kwargs): self.u_called = True self.u_call_args = (request, args, kwargs) def", "declarative_base from sqlalchemy.orm.query import Query from marshmallow import Schema, fields from pyramid_restful import", "= self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def", "arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs):", "2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest()", "Query) def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self):", "True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args,", "query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView()", "request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test", "TestCase, mock from pyramid import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import", "Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2 = User(id=2, name='testing 2')", "self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def", "UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def", "(request, args, kwargs) def create(self, request, *args, **kwargs): self.called = True self.call_args =", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_list_api_view_get(self): class", "('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True", "args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args = (request,", "data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True", "= (request, args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args", "view.request = self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self):", "Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name", "data assert view.d_called is True assert view.d_call_args == data assert view.u_called is True", "= (request, args, kwargs) def update(self, request, *args, **kwargs): self.called = True self.call_args", "view = UserAPIView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert", "view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called", "id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class =", "view.d_call_args == data assert view.u_called is True assert view.u_call_args == data assert view.p_called", "view = UserOverrideView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def", "def list(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "import Query from marshmallow import Schema, fields from pyramid_restful import generics from pyramid_restful.filters", "== 1 def test_no_paginator(self): view = UserOverrideView() view.request = self.request query = view.get_query()", "*args, **kwargs): self.partial_called = True self.partial_call_args = (request, args, kwargs) def update(self, request,", "True assert view.u_call_args == data assert view.p_called is True assert view.p_call_args == data", "self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request =", "view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance, User) assert instance.id ==", "cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession", "assert len(results) == 0 def test_paginate_query(self): view = UserAPIView() view.request = self.request query", "UserOverrideView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] ==", "view.partial_called is True assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self,", "def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request results", "request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is", "True self.partial_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.called =", "user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def", "assert instance.id == 1 assert instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView()", "def test_get_query_w_model(self): view = UserAPIView() view.request = self.request query = view.get_query() assert isinstance(query,", "request, *args, **kwargs): self.list_called = True self.list_call_args = (request, args, kwargs) def create(self,", "(request, args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called = True self.d_call_args =", "primary_key=True) name = Column(String) class UserSchema(Schema): id = fields.Integer() name = fields.String() class", "assert instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs", "data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called = True", "test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data view.put('test request', 'test arg',", "'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id", "test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args =", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self):", "def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request results", "id = Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id = fields.Integer() name", "schema_class = UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class", "data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True", "MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args = (request, args,", "view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request',", "True self.d_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.u_called =", "1 def test_no_paginator(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert", "instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs =", "kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def", "view.d_called is True assert view.d_call_args == data assert view.u_called is True assert view.u_call_args", "isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_override(self): view", "= {'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1", "UserOverrideView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_missing_model(self): view", "= UserOverrideView() view.request = self.request query = view.get_query() assert view.paginate_query(query) == None def", "request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is", "'testing' def test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1}", "def partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "view = UserOverrideView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert", "kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test", "assert view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is True", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockListCreateApiView() data", "= User schema_class = UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields =", "data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True", "self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test", "= (request, args, kwargs) view = MockListCreateApiView() data = ('test request', ('test arg',),", "self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data =", "kwargs) def create(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args =", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockListApiView() data", "test_get_query_w_model(self): view = UserAPIView() view.request = self.request query = view.get_query() assert isinstance(query, Query)", "assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs):", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockCreateApiView()", "__tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id", "(request, args, kwargs) view = MockListApiView() data = ('test request', ('test arg',), {'test_kwarg':", "test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object)", "'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "self.d_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.u_called = True", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView):", "True self.u_call_args = (request, args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called =", "'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test')", "create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data", "testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer from", "class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "def partial_update(self, request, *args, **kwargs): self.partial_called = True self.partial_call_args = (request, args, kwargs)", "= User lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args,", "self.r_call_args = (request, args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called = True", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert", "is True assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request,", "view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data", "mock from pyramid import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine,", "view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object()", "assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def", "**kwargs): self.r_called = True self.r_call_args = (request, args, kwargs) def destroy(self, request, *args,", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called", "= self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id == 1", "arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view =", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self):", "request', 'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg',", "self.request def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request", "True self.list_call_args = (request, args, kwargs) def create(self, request, *args, **kwargs): self.called =", "pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base = declarative_base()", "class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "from pyramid import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column,", "self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance, User) assert instance.id", "args, kwargs) view = MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "= (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',),", "UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,)", "== self.request def test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema = view.get_schema()", "kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user", "args, kwargs) def update(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "= Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id = fields.Integer() name =", "from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from marshmallow import Schema, fields", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self):", "**kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockDestroyApiView() data", "= MockListApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "results = view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view = UserAPIView() view.request", "view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request =", "args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "view.put('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data", "view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self,", "MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg',", "Schema, fields from pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://')", "kwargs) view = MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test", "view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request = self.request", "view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called", "= self.request query = view.get_query() assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView()", "test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView):", "assert view.called is True assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def", "isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def", "= UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert", "assert isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_not_found(self):", "marshmallow import Schema, fields from pyramid_restful import generics from pyramid_restful.filters import FieldFilter engine", "(User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id') def get_query(self): return", "== 1 assert results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params =", "view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data", "(request, args, kwargs) def update(self, request, *args, **kwargs): self.called = True self.call_args =", "def retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args = (request, args, kwargs)", "test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args ==", "from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__", "view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view =", "= MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test", "User lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs):", "(request, args, kwargs) view = MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg':", "assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def", "self.r_called = True self.r_call_args = (request, args, kwargs) def destroy(self, request, *args, **kwargs):", "== data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called =", "assert instance.id == 1 assert instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView()", "MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view", "(request, args, kwargs) view = MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg':", "def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True self.call_args", "class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self,", "{'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test", "from sqlalchemy.orm.query import Query from marshmallow import Schema, fields from pyramid_restful import generics", "self.call_args = (request, args, kwargs) view = MockListApiView() data = ('test request', ('test", "request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test arg',", "mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column", "= UserOverrideView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_missing_model(self):", "= True self.call_args = (request, args, kwargs) view = MockRetrieveApiView() data = ('test", "list(self, request, *args, **kwargs): self.list_called = True self.list_call_args = (request, args, kwargs) def", "True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request',", "test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'} view.request = self.request results =", "def partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args = (request, args, kwargs)", "def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView()", "get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine)", "query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request =", "results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id == 1 def test_filter_query_empty(self):", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class", "Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1,", "MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg',", "== 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id':", "UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model", "view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query", "assert len(results) == 1 assert results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView()", "def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args", "import FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ = 'user'", "retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args = (request, args, kwargs) def", "= declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name =", "= (request, args, kwargs) view = MockCreateApiView() data = ('test request', ('test arg',),", "FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ = 'user' id", "view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test", "test_get_schema(self): view = UserAPIView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema)", "assert view.d_call_args == data assert view.u_called is True assert view.u_call_args == data assert", "assert results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'}", "def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args", "unittest import TestCase, mock from pyramid import testing from pyramid.httpexceptions import HTTPNotFound from", "UserSchema(Schema): id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model = User schema_class", "UserAPIView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view", "arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test')", "self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self):", "'user' id = Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id = fields.Integer()", "MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True)", "String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import", "arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data view.put('test request', 'test", "self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request = self.request query =", "update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "import generics from pyramid_restful.filters import FieldFilter engine = create_engine('sqlite://') Base = declarative_base() class", "assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query)", "class UserSchema(Schema): id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model = User", "engine = create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ = 'user' id =", "{'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm import", "= {'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0", "self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data =", "view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView):", "view.called is True assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self,", "assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs):", "True assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args,", "is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request,", "assert isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_override(self):", "True assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args,", "request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is", "self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test", "view = UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object()", "view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class", "test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class", "{'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "= view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view =", "= MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "*args, **kwargs): self.list_called = True self.list_call_args = (request, args, kwargs) def create(self, request,", "test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args =", "def test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1", "isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing' def test_get_object_not_found(self): view", "cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view =", "view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView() view.request =", "view = MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request',", "= MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is True", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class", "True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args,", "view = MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request',", "GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2", "= {'id': 1} instance = view.get_object() assert isinstance(instance, User) assert instance.id == 1", "view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert", "test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called = True self.call_args =", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called", "= True self.call_args = (request, args, kwargs) view = MockDestroyApiView() data = ('test", "data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called = True", "is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request,", "User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request", "self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine)", "= UserAPIView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request']", "= view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view =", "{'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "Column(String) class UserSchema(Schema): id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView): model =", "view = UserAPIView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def", "test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called = True self.list_call_args =", "args, kwargs) view = MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod", "self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view = UserAPIView() view.request = self.request schema = view.get_schema()", "== data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called =", "= (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',),", "'test arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args == data assert view.d_called", "schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema =", "view = UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count ==", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView):", "True self.call_args = (request, args, kwargs) view = MockListApiView() data = ('test request',", "view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView() view.request = self.request", "def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return", "arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args == data view.post('test request', 'test", "class UserAPIView(generics.GenericAPIView): model = User schema_class = UserSchema pagination_class = mock.Mock() filter_classes =", "data view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs):", "kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "*args, **kwargs): self.d_called = True self.d_call_args = (request, args, kwargs) def update(self, request,", "User schema_class = UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,)", "== data view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockListApiView()", "is True assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request,", "= self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request = self.request query = view.get_query()", "UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def", "test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args =", "assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView() view.request = self.request query", "def update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "True self.call_args = (request, args, kwargs) view = MockDestroyApiView() data = ('test request',", "assert view.d_called is True assert view.d_call_args == data assert view.u_called is True assert", "= (request, args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args", "'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from marshmallow", "instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs =", "assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def", "@classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2 =", "model = User lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self,", "view.get_query() assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError,", "filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column =", "view.request = self.request query = view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view", "MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test')", "= (request, args, kwargs) view = MockDestroyApiView() data = ('test request', ('test arg',),", "view.r_call_args == data assert view.d_called is True assert view.d_call_args == data assert view.u_called", "True self.call_args = (request, args, kwargs) view = MockCreateApiView() data = ('test request',", "retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs):", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_destroy_api_view_delete(self):", "view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all()", "pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model =", "tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view", "view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance, User)", "= (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',),", "is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request,", "Query) def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert", "view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called", "def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2 = User(id=2,", "**kwargs): self.d_called = True self.d_call_args = (request, args, kwargs) def update(self, request, *args,", "test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True self.call_args =", "== data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.partial_called =", "test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance =", "user = User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit() @classmethod", "view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def", "== None def test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count", "assert view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({})", "def test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound,", "kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test", "self.request.params = {'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) ==", "request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def", "('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is True", "view = MockListCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "args, kwargs) view = MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "view.partial_called is True assert view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test') assert", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called", "HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker from", "(request, args, kwargs) view = MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg':", "view.get_query) def test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1}", "= (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User,", "schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view", "request, *args, **kwargs): self.partial_called = True self.partial_call_args = (request, args, kwargs) def update(self,", "kwargs) view = MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test", "= True self.r_call_args = (request, args, kwargs) def destroy(self, request, *args, **kwargs): self.d_called", "view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request", "True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request',", "pyramid import testing from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column, String,", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockDestroyApiView()", "kwargs) view = MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test", "assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs):", "MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg': 'test'})", "test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args =", "def test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance", "'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is True assert", "== data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called =", "len(results) == 0 def test_paginate_query(self): view = UserAPIView() view.request = self.request query =", "view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id ==", "data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called = True", "{'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args", "data assert view.u_called is True assert view.u_call_args == data assert view.p_called is True", "UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self): view", "MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self):", "(request, args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args =", "== data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called =", "self.request def test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema = view.get_schema() assert", "def test_paginate_query(self): view = UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query) assert", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class", "self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test", "class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) class", "class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "self.call_args = (request, args, kwargs) view = MockDestroyApiView() data = ('test request', ('test", "('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg',", "None def test_get_paginated_response(self): view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count ==", "results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request", "class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "== data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called =", "request', 'test arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args == data assert", "self.request query = view.get_query() assert isinstance(query, Query) def test_missing_model(self): view = generics.GenericAPIView() view.request", "= view.filter_query(view.get_query()).all() assert len(results) == 1 assert results[0].id == 1 def test_filter_query_empty(self): view", "= view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request", "**kwargs): self.u_called = True self.u_call_args = (request, args, kwargs) def partial_update(self, request, *args,", "{'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args", "class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called = True self.list_call_args = (request,", "assert isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView() self.request.params", "= view.filter_query(view.get_query()).all() assert len(results) == 0 def test_paginate_query(self): view = UserAPIView() view.request =", "view.list_called is True assert view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test') assert", "self.u_call_args = (request, args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called = True", "view.put('test request', 'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data", "sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from marshmallow import Schema,", "self.u_called = True self.u_call_args = (request, args, kwargs) def partial_update(self, request, *args, **kwargs):", "isinstance(query, Query) def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query = view.get_query()", "args, kwargs) def create(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "def get_query(self): return self.request.dbsession.query(self.model) def get_schema_class(self, *args, **kwargs): return UserSchema def get_dbsession(): Session", "UserOverrideView() view.request = self.request query = view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self):", "== 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request =", "True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args,", "= ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test", "assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs):", "request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is", "0 def test_paginate_query(self): view = UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query)", "setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user = User(id=1, name='testing') user2 = User(id=2, name='testing", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):", "args, kwargs) def update(self, request, *args, **kwargs): self.u_called = True self.u_call_args = (request,", "= True self.call_args = (request, args, kwargs) view = MockListApiView() data = ('test", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_update_api_view_partial_update(self): class", "{'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 1 assert", "import create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base", "= UserSchema pagination_class = mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView):", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView):", "*args, **kwargs): return UserSchema def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class", "== data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called =", "partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "arg', test_kwarg='test') assert view.r_called is True assert view.r_call_args == data assert view.d_called is", "True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args,", "class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called =", "data def test_retrieve_api_view_get(self): class MockRetrieveApiView(generics.RetrieveAPIView): def retrieve(self, request, *args, **kwargs): self.called = True", "True self.call_args = (request, args, kwargs) view = MockUpdateApiView() data = ('test request',", "assert view.list_called is True assert view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test')", "= self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view", "kwargs) view = MockListApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test", "assert view.r_call_args == data assert view.d_called is True assert view.d_call_args == data assert", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView):", "def create(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "test_kwarg='test') assert view.r_called is True assert view.r_call_args == data assert view.d_called is True", "True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args,", "'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]':", "def test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert isinstance(query,", "view = generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView()", "args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called = True self.p_call_args = (request,", "setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def test_get_query_w_model(self): view = UserAPIView() view.request", "def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.r_called = True self.r_call_args", "== 1 assert instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request =", "test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request results =", "arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data def test_list_create_api_view(self): class", "class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id') def get_query(self): return self.request.dbsession.query(self.model)", "== data assert view.u_called is True assert view.u_call_args == data assert view.p_called is", "Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession", "MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "'testing' def test_get_object_not_found(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3}", "from pyramid.httpexceptions import HTTPNotFound from sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm", "from sqlalchemy import create_engine, Column, String, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative", "view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_missing_model(self): view =", "is True assert view.u_call_args == data assert view.p_called is True assert view.p_call_args ==", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):", "data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test')", "request, *args, **kwargs): self.u_called = True self.u_call_args = (request, args, kwargs) def partial_update(self,", "Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query", "1 assert instance.name == 'testing' def test_get_object_not_found(self): view = UserAPIView() view.request = self.request", "*args, **kwargs): self.p_called = True self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView()", "UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert isinstance(instance,", "def test_no_paginator(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert view.paginate_query(query)", "MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "view = MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "is True assert view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test') assert view.called", "view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data", "arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert", "get_dbsession() user = User(id=1, name='testing') user2 = User(id=2, name='testing 2') cls.dbsession.add(user) cls.dbsession.add(user2) cls.dbsession.commit()", "True self.call_args = (request, args, kwargs) view = MockRetrieveApiView() data = ('test request',", "*args, **kwargs): self.u_called = True self.u_call_args = (request, args, kwargs) def partial_update(self, request,", "= UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert", "class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request,", "= view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView() view.request", "ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True", "self.request.params = {'filter[name]': 'testing'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) ==", "assert view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test') assert view.called is True", "== 0 def test_paginate_query(self): view = UserAPIView() view.request = self.request query = view.get_query()", "assert view.called is True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def", "instance.id == 1 assert instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView() view.request", "= True self.partial_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs): self.called", "= UserAPIView() view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self):", "= MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test", "view = UserAPIView() view.request = self.request view.get_paginated_response({}) assert view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase):", "view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1 def test_no_paginator(self): view = UserOverrideView() view.request = self.request", "self.called = True self.call_args = (request, args, kwargs) view = MockDestroyApiView() data =", "isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView() view.request =", "'test'}) view.get('test request', 'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request',", "arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "def get_dbsession(): Session = sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls):", "list(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView):", "view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request',", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called", "view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs)", "= MockRetrieveUpdateDestroyAPIView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "= UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count == 1", "view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args == data", "@classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession = self.dbsession def", "= UserOverrideView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance = view.get_object() assert", "= MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test", "self.called = True self.call_args = (request, args, kwargs) view = MockCreateApiView() data =", "== data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called =", "self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs =", "= True self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test", "is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request,", "MockRetrieveApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg',", "= generics.GenericAPIView() view.request = self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request", "MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args,", "view.called is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self,", "= (request, args, kwargs) view = MockListApiView() data = ('test request', ('test arg',),", "assert view.partial_called is True assert view.partial_call_args == data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def", "= (request, args, kwargs) def create(self, request, *args, **kwargs): self.called = True self.call_args", "self.request query = view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView()", "'test arg', test_kwarg='test') assert view.list_called is True assert view.list_call_args == data view.post('test request',", "= True self.u_call_args = (request, args, kwargs) def partial_update(self, request, *args, **kwargs): self.p_called", "test_paginate_query(self): view = UserAPIView() view.request = self.request query = view.get_query() view.paginate_query(query) assert view.paginator.paginate_query.call_count", "= mock.Mock() filter_classes = (FieldFilter,) filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User", "view.paginator.get_paginated_response.call_count == 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args,", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called", "('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called", "True assert view.r_call_args == data assert view.d_called is True assert view.d_call_args == data", "test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class", "self.called = True self.call_args = (request, args, kwargs) view = MockListCreateApiView() data =", "is True assert view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request,", "assert view.partial_called is True assert view.partial_call_args == data view.put('test request', 'test arg', test_kwarg='test')", "view.call_args == data def test_list_api_view_get(self): class MockListApiView(generics.ListAPIView): def list(self, request, *args, **kwargs): self.called", "test_get_query_w_override(self): view = UserOverrideView() view.request = self.request query = view.get_query() assert isinstance(query, Query)", "= create_engine('sqlite://') Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer,", "query = view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView() view.request", "1 assert results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]':", "view.get_object() assert isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing' def", "view.request = self.request query = view.get_query() assert isinstance(query, Query) def test_get_query_w_override(self): view =", "cls.dbsession.add(user2) cls.dbsession.commit() @classmethod def tearDownClass(cls): cls.dbsession.close() def setUp(self): self.request = testing.DummyRequest() self.request.dbsession =", "== 1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs):", "isinstance(schema, UserSchema) assert schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView() self.request.params =", "= (request, args, kwargs) view = MockRetrieveApiView() data = ('test request', ('test arg',),", "is True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request,", "('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.list_called is True", "view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called", "data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test')", "('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is True", "= UserOverrideView() view.request = self.request schema = view.get_schema() assert isinstance(schema, UserSchema) assert schema.context['request']", "= self.request self.assertRaises(AssertionError, view.get_query) def test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs", "view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test') view.patch('test request', 'test", "test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):", "assert instance.name == 'testing' def test_get_object_override(self): view = UserOverrideView() view.request = self.request view.lookup_url_kwargs", "True assert view.list_call_args == data view.post('test request', 'test arg', test_kwarg='test') assert view.called is", "'test arg', test_kwarg='test') view.delete('test request', 'test arg', test_kwarg='test') view.put('test request', 'test arg', test_kwarg='test')", "True self.p_call_args = (request, args, kwargs) view = MockRetrieveUpdateDestroyAPIView() data = ('test request',", "self.call_args = (request, args, kwargs) view = MockUpdateApiView() data = ('test request', ('test", "'test arg', test_kwarg='test') assert view.partial_called is True assert view.partial_call_args == data def test_list_create_api_view(self):", "name = Column(String) class UserSchema(Schema): id = fields.Integer() name = fields.String() class UserAPIView(generics.GenericAPIView):", "from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.query import Query from", "= MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test", "test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True self.call_args =", "self.d_called = True self.d_call_args = (request, args, kwargs) def update(self, request, *args, **kwargs):", "= sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession =", "self.list_called = True self.list_call_args = (request, args, kwargs) def create(self, request, *args, **kwargs):", "filter_fields = (User.name,) class UserOverrideView(generics.GenericAPIView): model = User lookup_column = (User, 'id') def", "= MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test", "= True self.call_args = (request, args, kwargs) view = MockCreateApiView() data = ('test", "data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test')", "def update(self, request, *args, **kwargs): self.u_called = True self.u_call_args = (request, args, kwargs)", "1 class ConcreteGenericAPIViewsTest(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called", "'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args ==", "= view.get_query() assert view.paginate_query(query) == None def test_get_paginated_response(self): view = UserAPIView() view.request =", "return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession() user =", "= UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 3} self.assertRaises(HTTPNotFound, view.get_object) def test_get_schema(self):", "create(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "{'filter[name]': 'testing3'} view.request = self.request results = view.filter_query(view.get_query()).all() assert len(results) == 0 def", "is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request,", "== data def test_list_create_api_view(self): class MockListCreateApiView(generics.ListCreateAPIView): def list(self, request, *args, **kwargs): self.list_called =", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class", "== data assert view.d_called is True assert view.d_call_args == data assert view.u_called is", "view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data", "data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True", "{'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockUpdateApiView()", "('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True", "True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args,", "arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_api_view_get(self): class", "destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view", "sessionmaker() Session.configure(bind=engine) return Session() class GenericAPIViewTests(TestCase): @classmethod def setUpClass(cls): Base.metadata.create_all(engine) cls.dbsession = get_dbsession()", "assert schema.context['request'] == self.request def test_override_get_schema(self): view = UserOverrideView() view.request = self.request schema", "= view.get_object() assert isinstance(instance, User) assert instance.id == 1 assert instance.name == 'testing'", "is True assert view.call_args == data def test_retrieve_update_destroy_api_view(self): class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView): def retrieve(self, request,", "test_get_object(self): view = UserAPIView() view.request = self.request view.lookup_url_kwargs = {'id': 1} instance =", "view.u_called is True assert view.u_call_args == data assert view.p_called is True assert view.p_call_args", "def test_get_schema(self): view = UserAPIView() view.request = self.request schema = view.get_schema() assert isinstance(schema,", "assert schema.context['request'] == self.request def test_filter_query(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing'}", "1 def test_filter_query_empty(self): view = UserAPIView() self.request.params = {'filter[name]': 'testing3'} view.request = self.request", "is True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request,", "arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert", "view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self,", "request, *args, **kwargs): self.r_called = True self.r_call_args = (request, args, kwargs) def destroy(self,", "= 'user' id = Column(Integer, primary_key=True) name = Column(String) class UserSchema(Schema): id =", "request', 'test arg', test_kwarg='test') view.patch('test request', 'test arg', test_kwarg='test') assert view.r_called is True", "self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveApiView() data =", "*args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView()", "len(results) == 1 assert results[0].id == 1 def test_filter_query_empty(self): view = UserAPIView() self.request.params" ]
[ "Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\"", "many gate servers must be ' 'specified as there are processes.') return super(CoreNlpPreprocessor,", "not in the main one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def", "process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id >", "the input_stream a batch of sentences, and yields the parsed data chunk. \"\"\"", "self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%',", "> 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many gate servers", "== kwargs['corenlp_props']: raise ValueError('At least as many gate servers must be ' 'specified", "input_stream: text += txt if len(text) > self.max_length: yield self.corenlp.parse(text) text = ''", "self.corenlp: del self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream): for chunk, parsed", "mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and", "'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp", "only created once, in the processing process, not in the main one. \"\"\"", "<reponame>DavidNemeskey/emLam<gh_stars>1-10 #!/usr/bin/env python3 \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\"", "self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None", "a Stanford CoreNLP server for analysis.\"\"\" from __future__ import absolute_import, division, print_function from", "The CoreNLP server is initialized here so that it is only created once,", "= '' for txt in input_stream: text += txt if len(text) > self.max_length:", "None def preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk >", "# Preserve the empty sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for", "the processing process, not in the main one. \"\"\" if not self.corenlp: self.corenlp", "for token in sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\"", "self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs):", "max_length self.corenlp = None def initialize(self): \"\"\" The CoreNLP server is initialized here", "self.corenlp = None def preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if", "parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the empty sentence separator", "txt if len(text) > self.max_length: yield self.corenlp.parse(text) text = '' if text: yield", "__future__ import absolute_import, division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor", "Stanford CoreNLP server for analysis.\"\"\" from __future__ import absolute_import, division, print_function from emLam.corenlp", "the main one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if", "is initialized here so that it is only created once, in the processing", "str(process_id)) if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as", "from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that", "0: # Preserve the empty sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token)", "**kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1", "that invokes a Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION =", "def cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream):", "input_stream): \"\"\" Parses the input with CoreNLP. This generator is called from preprocess().", "with CoreNLP. This generator is called from preprocess(). Reads from the input_stream a", "a batch of sentences, and yields the parsed data chunk. \"\"\" text =", "Reads from the input_stream a batch of sentences, and yields the parsed data", "in input_stream: text += txt if len(text) > self.max_length: yield self.corenlp.parse(text) text =", "emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP server", "absolute_import, division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor):", "for analysis.\"\"\" from __future__ import absolute_import, division, print_function from emLam.corenlp import CoreNLP from", "from __future__ import absolute_import, division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import", "input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve", "= CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None def preprocess(self,", "for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the empty", "Parses the input with CoreNLP. This generator is called from preprocess(). Reads from", "preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" from __future__ import absolute_import,", "corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp = None def initialize(self):", "from the input_stream a batch of sentences, and yields the parsed data chunk.", "'' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs)", "from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP", "from preprocess(). Reads from the input_stream a batch of sentences, and yields the", "input_stream a batch of sentences, and yields the parsed data chunk. \"\"\" text", "generator is called from preprocess(). Reads from the input_stream a batch of sentences,", "CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None def preprocess(self, input_stream,", "DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length =", "import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP server for", "CoreNLP server for analysis.\"\"\" from __future__ import absolute_import, division, print_function from emLam.corenlp import", "here so that it is only created once, in the processing process, not", "file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self,", "called from preprocess(). Reads from the input_stream a batch of sentences, and yields", "data chunk. \"\"\" text = '' for txt in input_stream: text += txt", "'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length", "preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: #", "between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in parsed),", "\"\"\" The CoreNLP server is initialized here so that it is only created", "'' for txt in input_stream: text += txt if len(text) > self.max_length: yield", "CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford", "def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if", "\"\"\" text = '' for txt in input_stream: text += txt if len(text)", "text += txt if len(text) > self.max_length: yield self.corenlp.parse(text) text = '' if", "input with CoreNLP. This generator is called from preprocess(). Reads from the input_stream", "for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with", "preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp =", "the parsed data chunk. \"\"\" text = '' for txt in input_stream: text", "txt in input_stream: text += txt if len(text) > self.max_length: yield self.corenlp.parse(text) text", "in the main one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self):", "text = '' for txt in input_stream: text += txt if len(text) >", "a Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor'", "None def initialize(self): \"\"\" The CoreNLP server is initialized here so that it", "analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props", "chunk > 0: # Preserve the empty sentence separator line between chunks print(u'',", "separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent", "yield self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1,", "if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props']", "self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)):", "import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a", "self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None def", "def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP. This generator is called", "\"\"\" Parses the input with CoreNLP. This generator is called from preprocess(). Reads", "yields the parsed data chunk. \"\"\" text = '' for txt in input_stream:", "text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] =", "the input with CoreNLP. This generator is called from preprocess(). Reads from the", "that it is only created once, in the processing process, not in the", "division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A", "Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def", "This generator is called from preprocess(). Reads from the input_stream a batch of", "CoreNLP. This generator is called from preprocess(). Reads from the input_stream a batch", "in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the empty sentence separator line", "so that it is only created once, in the processing process, not in", "if chunk > 0: # Preserve the empty sentence separator line between chunks", "parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP. This generator", "server for analysis.\"\"\" from __future__ import absolute_import, division, print_function from emLam.corenlp import CoreNLP", "instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id", "and yields the parsed data chunk. \"\"\" text = '' for txt in", "raise ValueError('At least as many gate servers must be ' 'specified as there", "sentences, and yields the parsed data chunk. \"\"\" text = '' for txt", "kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At", "mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']:", "= dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and mod_args['corenlp_props']", "one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del", "empty sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent)", "line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in", "preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION", "class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" NAME", "kwargs['corenlp_props']: raise ValueError('At least as many gate servers must be ' 'specified as", "invokes a Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP", "mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many gate servers must be '", "self.max_length = max_length self.corenlp = None def initialize(self): \"\"\" The CoreNLP server is", "gate servers must be ' 'specified as there are processes.') return super(CoreNlpPreprocessor, cls).instantiate(process_id,", "token in sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses", "output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the", "in sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the", "import absolute_import, division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class", "def initialize(self): \"\"\" The CoreNLP server is initialized here so that it is", "sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP.", "processing process, not in the main one. \"\"\" if not self.corenlp: self.corenlp =", "server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props,", "NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props =", "= '' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args =", "if len(text) > self.max_length: yield self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text)", "CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" NAME =", "process, not in the main one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props)", "that invokes a Stanford CoreNLP server for analysis.\"\"\" from __future__ import absolute_import, division,", "is only created once, in the processing process, not in the main one.", "the empty sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in", "main one. \"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp:", "python3 \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" from __future__", "once, in the processing process, not in the main one. \"\"\" if not", "in the processing process, not in the main one. \"\"\" if not self.corenlp:", "__parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP. This generator is called from", "= max_length self.corenlp = None def initialize(self): \"\"\" The CoreNLP server is initialized", "file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP. This generator is", "> self.max_length: yield self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text) @classmethod def", "preprocess(). Reads from the input_stream a batch of sentences, and yields the parsed", "1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many gate servers must", "max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp = None def initialize(self): \"\"\"", "print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor", "parsed data chunk. \"\"\" text = '' for txt in input_stream: text +=", "> 0: # Preserve the empty sentence separator line between chunks print(u'', file=output_stream)", "text = '' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args", "created once, in the processing process, not in the main one. \"\"\" if", "print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in parsed), file=output_stream) def", "analysis.\"\"\" from __future__ import absolute_import, division, print_function from emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base", "self.max_length: yield self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text) @classmethod def instantiate(cls,", "as many gate servers must be ' 'specified as there are processes.') return", "#!/usr/bin/env python3 \"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" from", "= corenlp_props self.max_length = max_length self.corenlp = None def initialize(self): \"\"\" The CoreNLP", "yield self.corenlp.parse(text) @classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace(", "invokes a Stanford CoreNLP server for analysis.\"\"\" from __future__ import absolute_import, division, print_function", "sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input", "chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the empty sentence", "CoreNLP server is initialized here so that it is only created once, in", "for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000):", "corenlp_props self.max_length = max_length self.corenlp = None def initialize(self): \"\"\" The CoreNLP server", "enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0: # Preserve the empty sentence separator line between", "CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self,", "if self.corenlp: del self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream): for chunk,", "len(text) > self.max_length: yield self.corenlp.parse(text) text = '' if text: yield self.corenlp.parse(text) @classmethod", "ValueError('At least as many gate servers must be ' 'specified as there are", "if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp", "Preserve the empty sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token", "for txt in input_stream: text += txt if len(text) > self.max_length: yield self.corenlp.parse(text)", "and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many gate servers must be", "self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp = None def initialize(self): \"\"\" The", "least as many gate servers must be ' 'specified as there are processes.')", "servers must be ' 'specified as there are processes.') return super(CoreNlpPreprocessor, cls).instantiate(process_id, **mod_args)", "in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream): \"\"\" Parses the input with CoreNLP. This", "self.corenlp = None def initialize(self): \"\"\" The CoreNLP server is initialized here so", "\"\"\" if not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp", "chunk. \"\"\" text = '' for txt in input_stream: text += txt if", "is called from preprocess(). Reads from the input_stream a batch of sentences, and", "sentence separator line between chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for", "\"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" NAME = 'CoreNLP'", "cleanup(self): if self.corenlp: del self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream): for", "server is initialized here so that it is only created once, in the", "dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and mod_args['corenlp_props'] ==", "def preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk > 0:", "process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many gate", "= 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length", "initialized here so that it is only created once, in the processing process,", "__init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp = None def", "+= txt if len(text) > self.max_length: yield self.corenlp.parse(text) text = '' if text:", "initialize(self): \"\"\" The CoreNLP server is initialized here so that it is only", "print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in parsed), file=output_stream) def __parse_with_corenlp(self, input_stream):", "'%', str(process_id)) if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least", "= kwargs['corenlp_props'].replace( '%', str(process_id)) if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise", "del self.corenlp self.corenlp = None def preprocess(self, input_stream, output_stream): for chunk, parsed in", "= None def initialize(self): \"\"\" The CoreNLP server is initialized here so that", "\"\"\"A preprocessor that invokes a Stanford CoreNLP server for analysis.\"\"\" from __future__ import", "def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props self.max_length = max_length self.corenlp = None", "of sentences, and yields the parsed data chunk. \"\"\" text = '' for", "= None def preprocess(self, input_stream, output_stream): for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)): if chunk", "emLam.corenlp import CoreNLP from emLam.corpus.preprocessor_base import Preprocessor class CoreNlpPreprocessor(Preprocessor): \"\"\"A preprocessor that invokes", "= 'CoreNLP' DESCRIPTION = 'CoreNLP preprocessor' def __init__(self, corenlp_props, max_length=10000): self.corenlp_props = corenlp_props", "chunks print(u'', file=output_stream) print(u'\\n\\n'.join(u'\\n'.join(u'\\t'.join(token) for token in sent) for sent in parsed), file=output_stream)", "batch of sentences, and yields the parsed data chunk. \"\"\" text = ''", "@classmethod def instantiate(cls, process_id=1, **kwargs): mod_args = dict(kwargs) mod_args['corenlp_props'] = kwargs['corenlp_props'].replace( '%', str(process_id))", "if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']: raise ValueError('At least as many", "it is only created once, in the processing process, not in the main", "not self.corenlp: self.corenlp = CoreNLP(self.corenlp_props) def cleanup(self): if self.corenlp: del self.corenlp self.corenlp =" ]