id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
224,600
ewels/MultiQC
multiqc/utils/log.py
move_tmp_log
def move_tmp_log(logger): """ Move the temporary log file to the MultiQC data directory if it exists. """ try: # https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile logging.shutdown() shutil.move(log_tmp_fn, os.path.join(config.data_dir, 'multiqc.log')) util_functions.robust_rmtree(log_tmp_dir) except (AttributeError, TypeError, IOError): pass
python
def move_tmp_log(logger): try: # https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile logging.shutdown() shutil.move(log_tmp_fn, os.path.join(config.data_dir, 'multiqc.log')) util_functions.robust_rmtree(log_tmp_dir) except (AttributeError, TypeError, IOError): pass
[ "def", "move_tmp_log", "(", "logger", ")", ":", "try", ":", "# https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile", "logging", ".", "shutdown", "(", ")", "shutil", ".", "move", "(", "log_tmp_fn", ",", "os", ".", "path", ".", "j...
Move the temporary log file to the MultiQC data directory if it exists.
[ "Move", "the", "temporary", "log", "file", "to", "the", "MultiQC", "data", "directory", "if", "it", "exists", "." ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/utils/log.py#L55-L65
224,601
ewels/MultiQC
multiqc/utils/log.py
get_log_stream
def get_log_stream(logger): """ Returns a stream to the root log file. If there is no logfile return the stderr log stream Returns: A stream to the root log file or stderr stream. """ file_stream = None log_stream = None for handler in logger.handlers: if isinstance(handler, logging.FileHandler): file_stream = handler.stream else: log_stream = handler.stream if file_stream: return file_stream return log_stream
python
def get_log_stream(logger): file_stream = None log_stream = None for handler in logger.handlers: if isinstance(handler, logging.FileHandler): file_stream = handler.stream else: log_stream = handler.stream if file_stream: return file_stream return log_stream
[ "def", "get_log_stream", "(", "logger", ")", ":", "file_stream", "=", "None", "log_stream", "=", "None", "for", "handler", "in", "logger", ".", "handlers", ":", "if", "isinstance", "(", "handler", ",", "logging", ".", "FileHandler", ")", ":", "file_stream", ...
Returns a stream to the root log file. If there is no logfile return the stderr log stream Returns: A stream to the root log file or stderr stream.
[ "Returns", "a", "stream", "to", "the", "root", "log", "file", ".", "If", "there", "is", "no", "logfile", "return", "the", "stderr", "log", "stream" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/utils/log.py#L68-L88
224,602
ewels/MultiQC
multiqc/modules/jellyfish/jellyfish.py
MultiqcModule.parse_jellyfish_data
def parse_jellyfish_data(self, f): """ Go through the hist file and memorise it """ histogram = {} occurence = 0 for line in f['f']: line = line.rstrip('\n') occurence = int(line.split(" ")[0]) count = int(line.split(" ")[1]) histogram[occurence] = occurence*count #delete last occurnece as it is the sum of all kmer occuring more often than it. del histogram[occurence] #sanity check self.max_key = max(histogram, key=histogram.get) self.jellyfish_max_x = max(self.jellyfish_max_x, self.max_key) if len(histogram) > 0: if f['s_name'] in self.jellyfish_data: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f) self.jellyfish_data[f['s_name']] = histogram
python
def parse_jellyfish_data(self, f): histogram = {} occurence = 0 for line in f['f']: line = line.rstrip('\n') occurence = int(line.split(" ")[0]) count = int(line.split(" ")[1]) histogram[occurence] = occurence*count #delete last occurnece as it is the sum of all kmer occuring more often than it. del histogram[occurence] #sanity check self.max_key = max(histogram, key=histogram.get) self.jellyfish_max_x = max(self.jellyfish_max_x, self.max_key) if len(histogram) > 0: if f['s_name'] in self.jellyfish_data: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f) self.jellyfish_data[f['s_name']] = histogram
[ "def", "parse_jellyfish_data", "(", "self", ",", "f", ")", ":", "histogram", "=", "{", "}", "occurence", "=", "0", "for", "line", "in", "f", "[", "'f'", "]", ":", "line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "occurence", "=", "int", "(", ...
Go through the hist file and memorise it
[ "Go", "through", "the", "hist", "file", "and", "memorise", "it" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/jellyfish/jellyfish.py#L40-L58
224,603
ewels/MultiQC
multiqc/modules/jellyfish/jellyfish.py
MultiqcModule.frequencies_plot
def frequencies_plot(self, xmin=0, xmax=200): """ Generate the qualities plot """ helptext = ''' A possible way to assess the complexity of a library even in absence of a reference sequence is to look at the kmer profile of the reads. The idea is to count all the kmers (_i.e._, sequence of length `k`) that occur in the reads. In this way it is possible to know how many kmers occur `1,2,.., N` times and represent this as a plot. This plot tell us for each x, how many k-mers (y-axis) are present in the dataset in exactly x-copies. In an ideal world (no errors in sequencing, no bias, no repeated regions) this plot should be as close as possible to a gaussian distribution. In reality we will always see a peak for `x=1` (_i.e._, the errors) and another peak close to the expected coverage. If the genome is highly heterozygous a second peak at half of the coverage can be expected.''' pconfig = { 'id': 'Jellyfish_kmer_plot', 'title': 'Jellyfish: K-mer plot', 'ylab': 'Counts', 'xlab': 'k-mer frequency', 'xDecimals': False, 'xmin': xmin, 'xmax': xmax } self.add_section( anchor = 'jellyfish_kmer_plot', description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.', helptext = helptext, plot = linegraph.plot(self.jellyfish_data, pconfig) )
python
def frequencies_plot(self, xmin=0, xmax=200): helptext = ''' A possible way to assess the complexity of a library even in absence of a reference sequence is to look at the kmer profile of the reads. The idea is to count all the kmers (_i.e._, sequence of length `k`) that occur in the reads. In this way it is possible to know how many kmers occur `1,2,.., N` times and represent this as a plot. This plot tell us for each x, how many k-mers (y-axis) are present in the dataset in exactly x-copies. In an ideal world (no errors in sequencing, no bias, no repeated regions) this plot should be as close as possible to a gaussian distribution. In reality we will always see a peak for `x=1` (_i.e._, the errors) and another peak close to the expected coverage. If the genome is highly heterozygous a second peak at half of the coverage can be expected.''' pconfig = { 'id': 'Jellyfish_kmer_plot', 'title': 'Jellyfish: K-mer plot', 'ylab': 'Counts', 'xlab': 'k-mer frequency', 'xDecimals': False, 'xmin': xmin, 'xmax': xmax } self.add_section( anchor = 'jellyfish_kmer_plot', description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.', helptext = helptext, plot = linegraph.plot(self.jellyfish_data, pconfig) )
[ "def", "frequencies_plot", "(", "self", ",", "xmin", "=", "0", ",", "xmax", "=", "200", ")", ":", "helptext", "=", "'''\n A possible way to assess the complexity of a library even in\n absence of a reference sequence is to look at the kmer profile of the reads.\...
Generate the qualities plot
[ "Generate", "the", "qualities", "plot" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/jellyfish/jellyfish.py#L61-L94
224,604
ewels/MultiQC
multiqc/modules/rseqc/infer_experiment.py
parse_reports
def parse_reports(self): """ Find RSeQC infer_experiment reports and parse their data """ # Set up vars self.infer_exp = dict() regexes = { 'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)", 'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)", 'se_sense': r"\"\+\+,--\": (\d\.\d+)", 'se_antisense': r"\+-,-\+\": (\d\.\d+)", 'failed': r"Fraction of reads failed to determine: (\d\.\d+)" } # Go through files and parse data using regexes for f in self.find_log_files('rseqc/infer_experiment'): d = dict() for k, r in regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = float(r_search.group(1)) if len(d) > 0: if f['s_name'] in self.infer_exp: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='infer_experiment') self.infer_exp[f['s_name']] = d # Filter to strip out ignored sample names self.infer_exp = self.ignore_samples(self.infer_exp) if len(self.infer_exp) > 0: # Write to file self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment') # Merge PE and SE for plot pdata = dict() for s_name, vals in self.infer_exp.items(): pdata[s_name] = dict() for k, v in vals.items(): v *= 100.0 # Multiply to get percentage if k[:2] == 'pe' or k[:2] == 'se': k = k[3:] pdata[s_name][k] = v + pdata[s_name].get(k, 0) # Plot bar graph of groups keys = OrderedDict() keys['sense'] = {'name': "Sense"} keys['antisense'] = {'name': "Antisense"} keys['failed'] = {'name': "Undetermined"} # Config for the plot pconfig = { 'id': 'rseqc_infer_experiment_plot', 'title': 'RSeQC: Infer experiment', 'ylab': '% Tags', 'ymin': 0, 'ymax': 100, 'tt_percentages': False, 'ylab_format': '{value}%', 'cpswitch': False } self.add_section ( name = 'Infer experiment', anchor = 'rseqc-infer_experiment', description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \ " counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \ " It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).", plot = bargraph.plot(pdata, keys, pconfig) ) # Return number of samples found return len(self.infer_exp)
python
def parse_reports(self): # Set up vars self.infer_exp = dict() regexes = { 'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)", 'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)", 'se_sense': r"\"\+\+,--\": (\d\.\d+)", 'se_antisense': r"\+-,-\+\": (\d\.\d+)", 'failed': r"Fraction of reads failed to determine: (\d\.\d+)" } # Go through files and parse data using regexes for f in self.find_log_files('rseqc/infer_experiment'): d = dict() for k, r in regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = float(r_search.group(1)) if len(d) > 0: if f['s_name'] in self.infer_exp: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='infer_experiment') self.infer_exp[f['s_name']] = d # Filter to strip out ignored sample names self.infer_exp = self.ignore_samples(self.infer_exp) if len(self.infer_exp) > 0: # Write to file self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment') # Merge PE and SE for plot pdata = dict() for s_name, vals in self.infer_exp.items(): pdata[s_name] = dict() for k, v in vals.items(): v *= 100.0 # Multiply to get percentage if k[:2] == 'pe' or k[:2] == 'se': k = k[3:] pdata[s_name][k] = v + pdata[s_name].get(k, 0) # Plot bar graph of groups keys = OrderedDict() keys['sense'] = {'name': "Sense"} keys['antisense'] = {'name': "Antisense"} keys['failed'] = {'name': "Undetermined"} # Config for the plot pconfig = { 'id': 'rseqc_infer_experiment_plot', 'title': 'RSeQC: Infer experiment', 'ylab': '% Tags', 'ymin': 0, 'ymax': 100, 'tt_percentages': False, 'ylab_format': '{value}%', 'cpswitch': False } self.add_section ( name = 'Infer experiment', anchor = 'rseqc-infer_experiment', description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \ " counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \ " It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).", plot = bargraph.plot(pdata, keys, pconfig) ) # Return number of samples found return len(self.infer_exp)
[ "def", "parse_reports", "(", "self", ")", ":", "# Set up vars", "self", ".", "infer_exp", "=", "dict", "(", ")", "regexes", "=", "{", "'pe_sense'", ":", "r\"\\\"1\\+\\+,1--,2\\+-,2-\\+\\\": (\\d\\.\\d+)\"", ",", "'pe_antisense'", ":", "r\"\\\"1\\+-,1-\\+,2\\+\\+,2--\\\"...
Find RSeQC infer_experiment reports and parse their data
[ "Find", "RSeQC", "infer_experiment", "reports", "and", "parse", "their", "data" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/rseqc/infer_experiment.py#L16-L89
224,605
ewels/MultiQC
multiqc/modules/leehom/leehom.py
MultiqcModule.parse_leehom_logs
def parse_leehom_logs(self, f): """ Go through log file looking for leehom output """ regexes = { 'total': r"Total reads[\s\:]+(\d+)", 'merged_trimming': r"Merged \(trimming\)\s+(\d+)", 'merged_overlap': r"Merged \(overlap\)\s+(\d+)", 'kept': r"Kept PE/SR\s+(\d+)", 'trimmed': r"Trimmed SR\s+(\d+)", 'adapter_dimers_chimeras': r"Adapter dimers/chimeras\s+(\d+)", 'failed_key': r"Failed Key\s+(\d+)" } parsed_data = dict() for l in f['f']: # Search regexes for overview stats for k, r in regexes.items(): match = re.search(r, l) if match: parsed_data[k] = int(match.group(1)) return parsed_data
python
def parse_leehom_logs(self, f): regexes = { 'total': r"Total reads[\s\:]+(\d+)", 'merged_trimming': r"Merged \(trimming\)\s+(\d+)", 'merged_overlap': r"Merged \(overlap\)\s+(\d+)", 'kept': r"Kept PE/SR\s+(\d+)", 'trimmed': r"Trimmed SR\s+(\d+)", 'adapter_dimers_chimeras': r"Adapter dimers/chimeras\s+(\d+)", 'failed_key': r"Failed Key\s+(\d+)" } parsed_data = dict() for l in f['f']: # Search regexes for overview stats for k, r in regexes.items(): match = re.search(r, l) if match: parsed_data[k] = int(match.group(1)) return parsed_data
[ "def", "parse_leehom_logs", "(", "self", ",", "f", ")", ":", "regexes", "=", "{", "'total'", ":", "r\"Total reads[\\s\\:]+(\\d+)\"", ",", "'merged_trimming'", ":", "r\"Merged \\(trimming\\)\\s+(\\d+)\"", ",", "'merged_overlap'", ":", "r\"Merged \\(overlap\\)\\s+(\\d+)\"", ...
Go through log file looking for leehom output
[ "Go", "through", "log", "file", "looking", "for", "leehom", "output" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/leehom/leehom.py#L51-L69
224,606
ewels/MultiQC
multiqc/modules/leehom/leehom.py
MultiqcModule.leehom_general_stats_table
def leehom_general_stats_table(self): """ Take the parsed stats from the leeHom report and add it to the basic stats table at the top of the report """ headers = {} headers['merged_trimming'] = { 'title': '{} Merged (Trimming)'.format(config.read_count_prefix), 'description': 'Merged clusters from trimming ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['merged_overlap'] = { 'title': '{} Merged (Overlap)'.format(config.read_count_prefix), 'description': 'Merged clusters from overlapping reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.leehom_data, headers)
python
def leehom_general_stats_table(self): headers = {} headers['merged_trimming'] = { 'title': '{} Merged (Trimming)'.format(config.read_count_prefix), 'description': 'Merged clusters from trimming ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['merged_overlap'] = { 'title': '{} Merged (Overlap)'.format(config.read_count_prefix), 'description': 'Merged clusters from overlapping reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.leehom_data, headers)
[ "def", "leehom_general_stats_table", "(", "self", ")", ":", "headers", "=", "{", "}", "headers", "[", "'merged_trimming'", "]", "=", "{", "'title'", ":", "'{} Merged (Trimming)'", ".", "format", "(", "config", ".", "read_count_prefix", ")", ",", "'description'",...
Take the parsed stats from the leeHom report and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "leeHom", "report", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/leehom/leehom.py#L72-L93
224,607
ewels/MultiQC
multiqc/modules/dedup/dedup.py
MultiqcModule.dedup_general_stats_table
def dedup_general_stats_table(self): """ Take the parsed stats from the DeDup report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['duplication_rate'] = { 'title': 'Duplication Rate', 'description': 'Percentage of reads categorised as a technical duplicate', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'OrRd', 'format': '{:,.0f}', 'modify': lambda x: x * 100.0 } self.general_stats_addcols(self.dedup_data, headers)
python
def dedup_general_stats_table(self): headers = OrderedDict() headers['duplication_rate'] = { 'title': 'Duplication Rate', 'description': 'Percentage of reads categorised as a technical duplicate', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'OrRd', 'format': '{:,.0f}', 'modify': lambda x: x * 100.0 } self.general_stats_addcols(self.dedup_data, headers)
[ "def", "dedup_general_stats_table", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'duplication_rate'", "]", "=", "{", "'title'", ":", "'Duplication Rate'", ",", "'description'", ":", "'Percentage of reads categorised as a technical dup...
Take the parsed stats from the DeDup report and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "DeDup", "report", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/dedup/dedup.py#L83-L98
224,608
ewels/MultiQC
multiqc/modules/macs2/macs2.py
MultiqcModule.macs_filtered_reads_plot
def macs_filtered_reads_plot(self): """ Plot of filtered reads for control and treatment samples """ data = dict() req_cats = ['control_fragments_total', 'control_fragments_after_filtering', 'treatment_fragments_total', 'treatment_fragments_after_filtering'] for s_name, d in self.macs_data.items(): if all([c in d for c in req_cats]): data['{}: Control'.format(s_name)] = dict() data['{}: Treatment'.format(s_name)] = dict() data['{}: Control'.format(s_name)]['fragments_filtered'] = d['control_fragments_total'] - d['control_fragments_after_filtering'] data['{}: Control'.format(s_name)]['fragments_not_filtered'] = d['control_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_filtered'] = d['treatment_fragments_total'] - d['treatment_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_not_filtered'] = d['treatment_fragments_after_filtering'] # Check that we have something to plot if len(data) == 0: return # Specify the order of the different possible categories keys = OrderedDict() keys['fragments_not_filtered'] = { 'color': '#437BB1', 'name': 'Remaining fragments' } keys['fragments_filtered'] = { 'color': '#B1084C', 'name': 'Filtered fragments' } # Config for the plot pconfig = { 'id': 'macs2_filtered', 'title': 'MACS2: Filtered Fragments', 'ylab': '# Fragments', 'cpswitch_counts_label': 'Number of Fragments', 'hide_zero_cats': False } self.add_section( plot = bargraph.plot(data, keys, pconfig) )
python
def macs_filtered_reads_plot(self): data = dict() req_cats = ['control_fragments_total', 'control_fragments_after_filtering', 'treatment_fragments_total', 'treatment_fragments_after_filtering'] for s_name, d in self.macs_data.items(): if all([c in d for c in req_cats]): data['{}: Control'.format(s_name)] = dict() data['{}: Treatment'.format(s_name)] = dict() data['{}: Control'.format(s_name)]['fragments_filtered'] = d['control_fragments_total'] - d['control_fragments_after_filtering'] data['{}: Control'.format(s_name)]['fragments_not_filtered'] = d['control_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_filtered'] = d['treatment_fragments_total'] - d['treatment_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_not_filtered'] = d['treatment_fragments_after_filtering'] # Check that we have something to plot if len(data) == 0: return # Specify the order of the different possible categories keys = OrderedDict() keys['fragments_not_filtered'] = { 'color': '#437BB1', 'name': 'Remaining fragments' } keys['fragments_filtered'] = { 'color': '#B1084C', 'name': 'Filtered fragments' } # Config for the plot pconfig = { 'id': 'macs2_filtered', 'title': 'MACS2: Filtered Fragments', 'ylab': '# Fragments', 'cpswitch_counts_label': 'Number of Fragments', 'hide_zero_cats': False } self.add_section( plot = bargraph.plot(data, keys, pconfig) )
[ "def", "macs_filtered_reads_plot", "(", "self", ")", ":", "data", "=", "dict", "(", ")", "req_cats", "=", "[", "'control_fragments_total'", ",", "'control_fragments_after_filtering'", ",", "'treatment_fragments_total'", ",", "'treatment_fragments_after_filtering'", "]", "...
Plot of filtered reads for control and treatment samples
[ "Plot", "of", "filtered", "reads", "for", "control", "and", "treatment", "samples" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/macs2/macs2.py#L101-L134
224,609
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.split_log
def split_log(logf): """split concat log into individual samples""" flashpatt = re.compile( r'\[FLASH\] Fast Length Adjustment of SHort reads\n(.+?)\[FLASH\] FLASH', flags=re.DOTALL) return flashpatt.findall(logf)
python
def split_log(logf): flashpatt = re.compile( r'\[FLASH\] Fast Length Adjustment of SHort reads\n(.+?)\[FLASH\] FLASH', flags=re.DOTALL) return flashpatt.findall(logf)
[ "def", "split_log", "(", "logf", ")", ":", "flashpatt", "=", "re", ".", "compile", "(", "r'\\[FLASH\\] Fast Length Adjustment of SHort reads\\n(.+?)\\[FLASH\\] FLASH'", ",", "flags", "=", "re", ".", "DOTALL", ")", "return", "flashpatt", ".", "findall", "(", "logf", ...
split concat log into individual samples
[ "split", "concat", "log", "into", "individual", "samples" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L67-L71
224,610
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.get_field
def get_field(field, slog, fl=False): """parse sample log for field set fl=True to return a float otherwise, returns int """ field += r'\:\s+([\d\.]+)' match = re.search(field, slog) if match: if fl: return float(match.group(1)) return int(match.group(1)) return 0
python
def get_field(field, slog, fl=False): field += r'\:\s+([\d\.]+)' match = re.search(field, slog) if match: if fl: return float(match.group(1)) return int(match.group(1)) return 0
[ "def", "get_field", "(", "field", ",", "slog", ",", "fl", "=", "False", ")", ":", "field", "+=", "r'\\:\\s+([\\d\\.]+)'", "match", "=", "re", ".", "search", "(", "field", ",", "slog", ")", "if", "match", ":", "if", "fl", ":", "return", "float", "(", ...
parse sample log for field set fl=True to return a float otherwise, returns int
[ "parse", "sample", "log", "for", "field", "set", "fl", "=", "True", "to", "return", "a", "float", "otherwise", "returns", "int" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L74-L85
224,611
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.clean_pe_name
def clean_pe_name(self, nlog, root): """additional name cleaning for paired end data""" use_output_name = getattr(config, 'flash', {}).get('use_output_name', False) if use_output_name: name = re.search(r'Output files\:\n\[FLASH\]\s+(.+?)\n', nlog) else: name = re.search(r'Input files\:\n\[FLASH\]\s+(.+?)\n', nlog) if not name: return None name = name.group(1) name = self.clean_s_name(name, root) return name
python
def clean_pe_name(self, nlog, root): use_output_name = getattr(config, 'flash', {}).get('use_output_name', False) if use_output_name: name = re.search(r'Output files\:\n\[FLASH\]\s+(.+?)\n', nlog) else: name = re.search(r'Input files\:\n\[FLASH\]\s+(.+?)\n', nlog) if not name: return None name = name.group(1) name = self.clean_s_name(name, root) return name
[ "def", "clean_pe_name", "(", "self", ",", "nlog", ",", "root", ")", ":", "use_output_name", "=", "getattr", "(", "config", ",", "'flash'", ",", "{", "}", ")", ".", "get", "(", "'use_output_name'", ",", "False", ")", "if", "use_output_name", ":", "name", ...
additional name cleaning for paired end data
[ "additional", "name", "cleaning", "for", "paired", "end", "data" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L87-L98
224,612
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.parse_flash_log
def parse_flash_log(self, logf): """parse flash logs""" data = OrderedDict() samplelogs = self.split_log(logf['f']) for slog in samplelogs: try: sample = dict() ## Sample name ## s_name = self.clean_pe_name(slog, logf['root']) if s_name is None: continue sample['s_name'] = s_name ## Log attributes ## sample['totalpairs'] = self.get_field('Total pairs', slog) sample['discardpairs'] = self.get_field('Discarded pairs', slog) sample['percdiscard'] = self.get_field('Percent Discarded', slog, fl=True) sample['combopairs'] = self.get_field('Combined pairs', slog) sample['inniepairs'] = self.get_field('Innie pairs', slog) sample['outiepairs'] = self.get_field('Outie pairs', slog) sample['uncombopairs'] = self.get_field('Uncombined pairs', slog) sample['perccombo'] = self.get_field('Percent combined', slog, fl=True) data[s_name] = sample except Exception as err: log.warning("Error parsing record in {}. {}".format(logf['fn'], err)) log.debug(traceback.format_exc()) continue return data
python
def parse_flash_log(self, logf): data = OrderedDict() samplelogs = self.split_log(logf['f']) for slog in samplelogs: try: sample = dict() ## Sample name ## s_name = self.clean_pe_name(slog, logf['root']) if s_name is None: continue sample['s_name'] = s_name ## Log attributes ## sample['totalpairs'] = self.get_field('Total pairs', slog) sample['discardpairs'] = self.get_field('Discarded pairs', slog) sample['percdiscard'] = self.get_field('Percent Discarded', slog, fl=True) sample['combopairs'] = self.get_field('Combined pairs', slog) sample['inniepairs'] = self.get_field('Innie pairs', slog) sample['outiepairs'] = self.get_field('Outie pairs', slog) sample['uncombopairs'] = self.get_field('Uncombined pairs', slog) sample['perccombo'] = self.get_field('Percent combined', slog, fl=True) data[s_name] = sample except Exception as err: log.warning("Error parsing record in {}. {}".format(logf['fn'], err)) log.debug(traceback.format_exc()) continue return data
[ "def", "parse_flash_log", "(", "self", ",", "logf", ")", ":", "data", "=", "OrderedDict", "(", ")", "samplelogs", "=", "self", ".", "split_log", "(", "logf", "[", "'f'", "]", ")", "for", "slog", "in", "samplelogs", ":", "try", ":", "sample", "=", "di...
parse flash logs
[ "parse", "flash", "logs" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L100-L128
224,613
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.stats_table
def stats_table(self, data): """Add percent combined to general stats table""" headers = OrderedDict() headers['combopairs'] = { 'title': 'Combined pairs', 'description': 'Num read pairs combined', 'shared_key': 'read_count', 'hidden': True, 'scale': False } headers['perccombo'] = { 'title': '% Combined', 'description': '% read pairs combined', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'PiYG' } self.general_stats_addcols(data, headers)
python
def stats_table(self, data): headers = OrderedDict() headers['combopairs'] = { 'title': 'Combined pairs', 'description': 'Num read pairs combined', 'shared_key': 'read_count', 'hidden': True, 'scale': False } headers['perccombo'] = { 'title': '% Combined', 'description': '% read pairs combined', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'PiYG' } self.general_stats_addcols(data, headers)
[ "def", "stats_table", "(", "self", ",", "data", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'combopairs'", "]", "=", "{", "'title'", ":", "'Combined pairs'", ",", "'description'", ":", "'Num read pairs combined'", ",", "'shared_key'", "...
Add percent combined to general stats table
[ "Add", "percent", "combined", "to", "general", "stats", "table" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L130-L148
224,614
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.summary_plot
def summary_plot(data): """Barplot of combined pairs""" cats = OrderedDict() cats = { 'inniepairs': { 'name': 'Combined innie pairs', 'color': '#191970' }, 'outiepairs': { 'name': 'Combined outie pairs', 'color': '#00A08A' }, 'uncombopairs': { 'name': 'Uncombined pairs', 'color': '#cd1076' }, 'discardpairs': { 'name': 'Discarded pairs', 'color': '#ffd700' } } splotconfig = {'id': 'flash_combo_stats_plot', 'title': 'FLASh: Read combination statistics', 'ylab': 'Number of read pairs', 'hide_zero_cats': False } return bargraph.plot(data, cats, splotconfig)
python
def summary_plot(data): cats = OrderedDict() cats = { 'inniepairs': { 'name': 'Combined innie pairs', 'color': '#191970' }, 'outiepairs': { 'name': 'Combined outie pairs', 'color': '#00A08A' }, 'uncombopairs': { 'name': 'Uncombined pairs', 'color': '#cd1076' }, 'discardpairs': { 'name': 'Discarded pairs', 'color': '#ffd700' } } splotconfig = {'id': 'flash_combo_stats_plot', 'title': 'FLASh: Read combination statistics', 'ylab': 'Number of read pairs', 'hide_zero_cats': False } return bargraph.plot(data, cats, splotconfig)
[ "def", "summary_plot", "(", "data", ")", ":", "cats", "=", "OrderedDict", "(", ")", "cats", "=", "{", "'inniepairs'", ":", "{", "'name'", ":", "'Combined innie pairs'", ",", "'color'", ":", "'#191970'", "}", ",", "'outiepairs'", ":", "{", "'name'", ":", ...
Barplot of combined pairs
[ "Barplot", "of", "combined", "pairs" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L151-L176
224,615
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.parse_hist_files
def parse_hist_files(histf): """parse histogram files""" nameddata = dict() data = dict() try: for l in histf['f'].splitlines(): s = l.split() if s: if len(s) != 2: raise RuntimeError("invalid format: " + str(len(s)) + " column(s) found in row. must be exactly 2.") data[int(s[0])] = int(s[1]) except Exception as err: log.warning("Error parsing %s. %s", histf['fn'], err) log.debug(traceback.format_exc()) else: if data: nameddata[histf['s_name']] = data else: log.debug("%s is empty.", histf['fn']) finally: return nameddata
python
def parse_hist_files(histf): nameddata = dict() data = dict() try: for l in histf['f'].splitlines(): s = l.split() if s: if len(s) != 2: raise RuntimeError("invalid format: " + str(len(s)) + " column(s) found in row. must be exactly 2.") data[int(s[0])] = int(s[1]) except Exception as err: log.warning("Error parsing %s. %s", histf['fn'], err) log.debug(traceback.format_exc()) else: if data: nameddata[histf['s_name']] = data else: log.debug("%s is empty.", histf['fn']) finally: return nameddata
[ "def", "parse_hist_files", "(", "histf", ")", ":", "nameddata", "=", "dict", "(", ")", "data", "=", "dict", "(", ")", "try", ":", "for", "l", "in", "histf", "[", "'f'", "]", ".", "splitlines", "(", ")", ":", "s", "=", "l", ".", "split", "(", ")...
parse histogram files
[ "parse", "histogram", "files" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L179-L199
224,616
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.get_colors
def get_colors(n): """get colors for freqpoly graph""" cb_palette = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7","#001F3F", "#0074D9", "#7FDBFF", "#39CCCC", "#3D9970", "#2ECC40", "#01FF70", "#FFDC00", "#FF851B", "#FF4136", "#F012BE", "#B10DC9", "#85144B", "#AAAAAA", "#000000"] whole = int(n/22) extra = (n % 22) cols = cb_palette * whole if extra >= 0: cols.extend(cb_palette[0:extra]) return cols
python
def get_colors(n): cb_palette = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7","#001F3F", "#0074D9", "#7FDBFF", "#39CCCC", "#3D9970", "#2ECC40", "#01FF70", "#FFDC00", "#FF851B", "#FF4136", "#F012BE", "#B10DC9", "#85144B", "#AAAAAA", "#000000"] whole = int(n/22) extra = (n % 22) cols = cb_palette * whole if extra >= 0: cols.extend(cb_palette[0:extra]) return cols
[ "def", "get_colors", "(", "n", ")", ":", "cb_palette", "=", "[", "\"#E69F00\"", ",", "\"#56B4E9\"", ",", "\"#009E73\"", ",", "\"#F0E442\"", ",", "\"#0072B2\"", ",", "\"#D55E00\"", ",", "\"#CC79A7\"", ",", "\"#001F3F\"", ",", "\"#0074D9\"", ",", "\"#7FDBFF\"", ...
get colors for freqpoly graph
[ "get", "colors", "for", "freqpoly", "graph" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L202-L214
224,617
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.freqpoly_plot
def freqpoly_plot(data): """make freqpoly plot of merged read lengths""" rel_data = OrderedDict() for key, val in data.items(): tot = sum(val.values(), 0) rel_data[key] = {k: v / tot for k, v in val.items()} fplotconfig = { 'data_labels': [ {'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'}, {'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'} ], 'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths', 'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data)))) } return linegraph.plot([data, rel_data], fplotconfig)
python
def freqpoly_plot(data): rel_data = OrderedDict() for key, val in data.items(): tot = sum(val.values(), 0) rel_data[key] = {k: v / tot for k, v in val.items()} fplotconfig = { 'data_labels': [ {'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'}, {'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'} ], 'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths', 'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data)))) } return linegraph.plot([data, rel_data], fplotconfig)
[ "def", "freqpoly_plot", "(", "data", ")", ":", "rel_data", "=", "OrderedDict", "(", ")", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "tot", "=", "sum", "(", "val", ".", "values", "(", ")", ",", "0", ")", "rel_data", "[", ...
make freqpoly plot of merged read lengths
[ "make", "freqpoly", "plot", "of", "merged", "read", "lengths" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L217-L231
224,618
ewels/MultiQC
multiqc/modules/flash/flash.py
MultiqcModule.hist_results
def hist_results(self): """process flash numeric histograms""" self.hist_data = OrderedDict() for histfile in self.find_log_files('flash/hist'): self.hist_data.update(self.parse_hist_files(histfile)) # ignore sample names self.hist_data = self.ignore_samples(self.hist_data) try: if not self.hist_data: raise UserWarning log.info("Found %d histogram reports", len(self.hist_data)) self.add_section( name='Frequency polygons of merged read lengths', anchor='flash-histogram', description='This plot is made from the numerical histograms output by FLASh.', plot=self.freqpoly_plot(self.hist_data)) except UserWarning: pass except Exception as err: log.error(err) log.debug(traceback.format_exc()) return len(self.hist_data)
python
def hist_results(self): self.hist_data = OrderedDict() for histfile in self.find_log_files('flash/hist'): self.hist_data.update(self.parse_hist_files(histfile)) # ignore sample names self.hist_data = self.ignore_samples(self.hist_data) try: if not self.hist_data: raise UserWarning log.info("Found %d histogram reports", len(self.hist_data)) self.add_section( name='Frequency polygons of merged read lengths', anchor='flash-histogram', description='This plot is made from the numerical histograms output by FLASh.', plot=self.freqpoly_plot(self.hist_data)) except UserWarning: pass except Exception as err: log.error(err) log.debug(traceback.format_exc()) return len(self.hist_data)
[ "def", "hist_results", "(", "self", ")", ":", "self", ".", "hist_data", "=", "OrderedDict", "(", ")", "for", "histfile", "in", "self", ".", "find_log_files", "(", "'flash/hist'", ")", ":", "self", ".", "hist_data", ".", "update", "(", "self", ".", "parse...
process flash numeric histograms
[ "process", "flash", "numeric", "histograms" ]
2037d6322b2554146a74efbf869156ad20d4c4ec
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/flash/flash.py#L233-L258
224,619
PyPSA/PyPSA
examples/scigrid-de/add_load_gen_trafos_to_scigrid.py
generate_dummy_graph
def generate_dummy_graph(network): """Generate a dummy graph to feed to the FIAS libraries. It adds the "pos" attribute and removes the 380 kV duplicate buses when the buses have been split, so that all load and generation is attached to the 220kV bus.""" graph = pypsa.descriptors.OrderedGraph() graph.add_nodes_from([bus for bus in network.buses.index if bus not in buses_to_split]) #add positions to graph for voronoi cell computation for node in graph.nodes(): graph.node[node]["pos"] = np.array(network.buses.loc[node,["x","y"]],dtype=float) return graph
python
def generate_dummy_graph(network): graph = pypsa.descriptors.OrderedGraph() graph.add_nodes_from([bus for bus in network.buses.index if bus not in buses_to_split]) #add positions to graph for voronoi cell computation for node in graph.nodes(): graph.node[node]["pos"] = np.array(network.buses.loc[node,["x","y"]],dtype=float) return graph
[ "def", "generate_dummy_graph", "(", "network", ")", ":", "graph", "=", "pypsa", ".", "descriptors", ".", "OrderedGraph", "(", ")", "graph", ".", "add_nodes_from", "(", "[", "bus", "for", "bus", "in", "network", ".", "buses", ".", "index", "if", "bus", "n...
Generate a dummy graph to feed to the FIAS libraries. It adds the "pos" attribute and removes the 380 kV duplicate buses when the buses have been split, so that all load and generation is attached to the 220kV bus.
[ "Generate", "a", "dummy", "graph", "to", "feed", "to", "the", "FIAS", "libraries", ".", "It", "adds", "the", "pos", "attribute", "and", "removes", "the", "380", "kV", "duplicate", "buses", "when", "the", "buses", "have", "been", "split", "so", "that", "a...
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/examples/scigrid-de/add_load_gen_trafos_to_scigrid.py#L392-L406
224,620
PyPSA/PyPSA
examples/scigrid-de/add_load_gen_trafos_to_scigrid.py
voronoi_partition
def voronoi_partition(G, outline): """ For 2D-embedded graph `G`, within the boundary given by the shapely polygon `outline`, returns `G` with the Voronoi cell region as an additional node attribute. """ #following line from vresutils.graph caused a bug #G = polygon_subgraph(G, outline, copy=False) points = list(vresutils.graph.get_node_attributes(G, 'pos').values()) regions = vresutils.graph.voronoi_partition_pts(points, outline, no_multipolygons=True) nx.set_node_attributes(G, 'region', dict(zip(G.nodes(), regions))) return G
python
def voronoi_partition(G, outline): #following line from vresutils.graph caused a bug #G = polygon_subgraph(G, outline, copy=False) points = list(vresutils.graph.get_node_attributes(G, 'pos').values()) regions = vresutils.graph.voronoi_partition_pts(points, outline, no_multipolygons=True) nx.set_node_attributes(G, 'region', dict(zip(G.nodes(), regions))) return G
[ "def", "voronoi_partition", "(", "G", ",", "outline", ")", ":", "#following line from vresutils.graph caused a bug", "#G = polygon_subgraph(G, outline, copy=False)", "points", "=", "list", "(", "vresutils", ".", "graph", ".", "get_node_attributes", "(", "G", ",", "'pos'",...
For 2D-embedded graph `G`, within the boundary given by the shapely polygon `outline`, returns `G` with the Voronoi cell region as an additional node attribute.
[ "For", "2D", "-", "embedded", "graph", "G", "within", "the", "boundary", "given", "by", "the", "shapely", "polygon", "outline", "returns", "G", "with", "the", "Voronoi", "cell", "region", "as", "an", "additional", "node", "attribute", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/examples/scigrid-de/add_load_gen_trafos_to_scigrid.py#L412-L424
224,621
PyPSA/PyPSA
pypsa/geo.py
area_from_lon_lat_poly
def area_from_lon_lat_poly(geometry): """ Compute the area in km^2 of a shapely geometry, whose points are in longitude and latitude. Parameters ---------- geometry: shapely geometry Points must be in longitude and latitude. Returns ------- area: float Area in km^2. """ import pyproj from shapely.ops import transform from functools import partial project = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # Source: Lon-Lat pyproj.Proj(proj='aea')) # Target: Albers Equal Area Conical https://en.wikipedia.org/wiki/Albers_projection new_geometry = transform(project, geometry) #default area is in m^2 return new_geometry.area/1e6
python
def area_from_lon_lat_poly(geometry): import pyproj from shapely.ops import transform from functools import partial project = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # Source: Lon-Lat pyproj.Proj(proj='aea')) # Target: Albers Equal Area Conical https://en.wikipedia.org/wiki/Albers_projection new_geometry = transform(project, geometry) #default area is in m^2 return new_geometry.area/1e6
[ "def", "area_from_lon_lat_poly", "(", "geometry", ")", ":", "import", "pyproj", "from", "shapely", ".", "ops", "import", "transform", "from", "functools", "import", "partial", "project", "=", "partial", "(", "pyproj", ".", "transform", ",", "pyproj", ".", "Pro...
Compute the area in km^2 of a shapely geometry, whose points are in longitude and latitude. Parameters ---------- geometry: shapely geometry Points must be in longitude and latitude. Returns ------- area: float Area in km^2.
[ "Compute", "the", "area", "in", "km^2", "of", "a", "shapely", "geometry", "whose", "points", "are", "in", "longitude", "and", "latitude", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/geo.py#L90-L119
224,622
PyPSA/PyPSA
pypsa/opf.py
define_sub_network_cycle_constraints
def define_sub_network_cycle_constraints( subnetwork, snapshots, passive_branch_p, attribute): """ Constructs cycle_constraints for a particular subnetwork """ sub_network_cycle_constraints = {} sub_network_cycle_index = [] matrix = subnetwork.C.tocsc() branches = subnetwork.branches() for col_j in range( matrix.shape[1] ): cycle_is = matrix.getcol(col_j).nonzero()[0] if len(cycle_is) == 0: continue sub_network_cycle_index.append((subnetwork.name, col_j)) branch_idx_attributes = [] for cycle_i in cycle_is: branch_idx = branches.index[cycle_i] attribute_value = 1e5 * branches.at[ branch_idx, attribute] * subnetwork.C[ cycle_i, col_j] branch_idx_attributes.append( (branch_idx, attribute_value)) for snapshot in snapshots: expression_list = [ (attribute_value, passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes] lhs = LExpression(expression_list) sub_network_cycle_constraints[subnetwork.name,col_j,snapshot] = LConstraint(lhs,"==",LExpression()) return( sub_network_cycle_index, sub_network_cycle_constraints)
python
def define_sub_network_cycle_constraints( subnetwork, snapshots, passive_branch_p, attribute): sub_network_cycle_constraints = {} sub_network_cycle_index = [] matrix = subnetwork.C.tocsc() branches = subnetwork.branches() for col_j in range( matrix.shape[1] ): cycle_is = matrix.getcol(col_j).nonzero()[0] if len(cycle_is) == 0: continue sub_network_cycle_index.append((subnetwork.name, col_j)) branch_idx_attributes = [] for cycle_i in cycle_is: branch_idx = branches.index[cycle_i] attribute_value = 1e5 * branches.at[ branch_idx, attribute] * subnetwork.C[ cycle_i, col_j] branch_idx_attributes.append( (branch_idx, attribute_value)) for snapshot in snapshots: expression_list = [ (attribute_value, passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes] lhs = LExpression(expression_list) sub_network_cycle_constraints[subnetwork.name,col_j,snapshot] = LConstraint(lhs,"==",LExpression()) return( sub_network_cycle_index, sub_network_cycle_constraints)
[ "def", "define_sub_network_cycle_constraints", "(", "subnetwork", ",", "snapshots", ",", "passive_branch_p", ",", "attribute", ")", ":", "sub_network_cycle_constraints", "=", "{", "}", "sub_network_cycle_index", "=", "[", "]", "matrix", "=", "subnetwork", ".", "C", ...
Constructs cycle_constraints for a particular subnetwork
[ "Constructs", "cycle_constraints", "for", "a", "particular", "subnetwork" ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L732-L764
224,623
PyPSA/PyPSA
pypsa/opf.py
define_passive_branch_flows_with_kirchhoff
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False): """ define passive branch flows with the kirchoff method """ for sub_network in network.sub_networks.obj: find_tree(sub_network) find_cycles(sub_network) #following is necessary to calculate angles post-facto find_bus_controls(sub_network) if len(sub_network.branches_i()) > 0: calculate_B_H(sub_network) passive_branches = network.passive_branches() if not skip_vars: network.model.passive_branch_p = Var(list(passive_branches.index), snapshots) cycle_index = [] cycle_constraints = {} for subnetwork in network.sub_networks.obj: branches = subnetwork.branches() attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff" sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork, snapshots, network.model.passive_branch_p, attribute) cycle_index.extend( sub_network_cycle_index) cycle_constraints.update( sub_network_cycle_constraints) l_constraint(network.model, "cycle_constraints", cycle_constraints, cycle_index, snapshots)
python
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False): for sub_network in network.sub_networks.obj: find_tree(sub_network) find_cycles(sub_network) #following is necessary to calculate angles post-facto find_bus_controls(sub_network) if len(sub_network.branches_i()) > 0: calculate_B_H(sub_network) passive_branches = network.passive_branches() if not skip_vars: network.model.passive_branch_p = Var(list(passive_branches.index), snapshots) cycle_index = [] cycle_constraints = {} for subnetwork in network.sub_networks.obj: branches = subnetwork.branches() attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff" sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork, snapshots, network.model.passive_branch_p, attribute) cycle_index.extend( sub_network_cycle_index) cycle_constraints.update( sub_network_cycle_constraints) l_constraint(network.model, "cycle_constraints", cycle_constraints, cycle_index, snapshots)
[ "def", "define_passive_branch_flows_with_kirchhoff", "(", "network", ",", "snapshots", ",", "skip_vars", "=", "False", ")", ":", "for", "sub_network", "in", "network", ".", "sub_networks", ".", "obj", ":", "find_tree", "(", "sub_network", ")", "find_cycles", "(", ...
define passive branch flows with the kirchoff method
[ "define", "passive", "branch", "flows", "with", "the", "kirchoff", "method" ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L832-L865
224,624
PyPSA/PyPSA
pypsa/opf.py
network_lopf_build_model
def network_lopf_build_model(network, snapshots=None, skip_pre=False, formulation="angles", ptdf_tolerance=0.): """ Build pyomo model for linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored Returns ------- network.model """ if not skip_pre: network.determine_network_topology() calculate_dependent_values(network) for sub_network in network.sub_networks.obj: find_slack_bus(sub_network) logger.info("Performed preliminary steps") snapshots = _as_snapshots(network, snapshots) logger.info("Building pyomo model using `%s` formulation", formulation) network.model = ConcreteModel("Linear Optimal Power Flow") define_generator_variables_constraints(network,snapshots) define_storage_variables_constraints(network,snapshots) define_store_variables_constraints(network,snapshots) define_branch_extension_variables(network,snapshots) define_link_flows(network,snapshots) define_nodal_balances(network,snapshots) define_passive_branch_flows(network,snapshots,formulation,ptdf_tolerance) define_passive_branch_constraints(network,snapshots) if formulation in ["angles", "kirchhoff"]: define_nodal_balance_constraints(network,snapshots) elif formulation in ["ptdf", "cycles"]: define_sub_network_balance_constraints(network,snapshots) define_global_constraints(network,snapshots) define_linear_objective(network, snapshots) #tidy up auxilliary expressions del network._p_balance #force solver to also give us the dual prices network.model.dual = Suffix(direction=Suffix.IMPORT) return network.model
python
def network_lopf_build_model(network, snapshots=None, skip_pre=False, formulation="angles", ptdf_tolerance=0.): if not skip_pre: network.determine_network_topology() calculate_dependent_values(network) for sub_network in network.sub_networks.obj: find_slack_bus(sub_network) logger.info("Performed preliminary steps") snapshots = _as_snapshots(network, snapshots) logger.info("Building pyomo model using `%s` formulation", formulation) network.model = ConcreteModel("Linear Optimal Power Flow") define_generator_variables_constraints(network,snapshots) define_storage_variables_constraints(network,snapshots) define_store_variables_constraints(network,snapshots) define_branch_extension_variables(network,snapshots) define_link_flows(network,snapshots) define_nodal_balances(network,snapshots) define_passive_branch_flows(network,snapshots,formulation,ptdf_tolerance) define_passive_branch_constraints(network,snapshots) if formulation in ["angles", "kirchhoff"]: define_nodal_balance_constraints(network,snapshots) elif formulation in ["ptdf", "cycles"]: define_sub_network_balance_constraints(network,snapshots) define_global_constraints(network,snapshots) define_linear_objective(network, snapshots) #tidy up auxilliary expressions del network._p_balance #force solver to also give us the dual prices network.model.dual = Suffix(direction=Suffix.IMPORT) return network.model
[ "def", "network_lopf_build_model", "(", "network", ",", "snapshots", "=", "None", ",", "skip_pre", "=", "False", ",", "formulation", "=", "\"angles\"", ",", "ptdf_tolerance", "=", "0.", ")", ":", "if", "not", "skip_pre", ":", "network", ".", "determine_network...
Build pyomo model for linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored Returns ------- network.model
[ "Build", "pyomo", "model", "for", "linear", "optimal", "power", "flow", "for", "a", "group", "of", "snapshots", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L1329-L1398
224,625
PyPSA/PyPSA
pypsa/opf.py
network_lopf_prepare_solver
def network_lopf_prepare_solver(network, solver_name="glpk", solver_io=None): """ Prepare solver for linear optimal power flow. Parameters ---------- solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" Returns ------- None """ network.opt = SolverFactory(solver_name, solver_io=solver_io) patch_optsolver_record_memusage_before_solving(network.opt, network) if isinstance(network.opt, PersistentSolver): network.opt.set_instance(network.model) return network.opt
python
def network_lopf_prepare_solver(network, solver_name="glpk", solver_io=None): network.opt = SolverFactory(solver_name, solver_io=solver_io) patch_optsolver_record_memusage_before_solving(network.opt, network) if isinstance(network.opt, PersistentSolver): network.opt.set_instance(network.model) return network.opt
[ "def", "network_lopf_prepare_solver", "(", "network", ",", "solver_name", "=", "\"glpk\"", ",", "solver_io", "=", "None", ")", ":", "network", ".", "opt", "=", "SolverFactory", "(", "solver_name", ",", "solver_io", "=", "solver_io", ")", "patch_optsolver_record_me...
Prepare solver for linear optimal power flow. Parameters ---------- solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" Returns ------- None
[ "Prepare", "solver", "for", "linear", "optimal", "power", "flow", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L1400-L1425
224,626
PyPSA/PyPSA
pypsa/opf.py
network_lopf_solve
def network_lopf_solve(network, snapshots=None, formulation="angles", solver_options={},solver_logfile=None, keep_files=False, free_memory={'pyomo'},extra_postprocessing=None): """ Solve linear optimal power flow for a group of snapshots and extract results. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"]; must match formulation used for building the model. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) solver_logfile : None|string If not None, sets the logfile option of the solver. keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None """ snapshots = _as_snapshots(network, snapshots) logger.info("Solving model using %s", network.opt.name) if isinstance(network.opt, PersistentSolver): args = [] else: args = [network.model] if isinstance(free_memory, string_types): free_memory = {free_memory} if 'pypsa' in free_memory: with empty_network(network): network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) else: network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) if logger.isEnabledFor(logging.INFO): network.results.write() status = network.results["Solver"][0]["Status"].key termination_condition = network.results["Solver"][0]["Termination condition"].key if status == "ok" and termination_condition == "optimal": logger.info("Optimization successful") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) elif status == "warning" and termination_condition == "other": logger.warning("WARNING! Optimization might be sub-optimal. Writing output anyway") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) else: logger.error("Optimisation failed with status %s and terminal condition %s" % (status, termination_condition)) return status, termination_condition
python
def network_lopf_solve(network, snapshots=None, formulation="angles", solver_options={},solver_logfile=None, keep_files=False, free_memory={'pyomo'},extra_postprocessing=None): snapshots = _as_snapshots(network, snapshots) logger.info("Solving model using %s", network.opt.name) if isinstance(network.opt, PersistentSolver): args = [] else: args = [network.model] if isinstance(free_memory, string_types): free_memory = {free_memory} if 'pypsa' in free_memory: with empty_network(network): network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) else: network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) if logger.isEnabledFor(logging.INFO): network.results.write() status = network.results["Solver"][0]["Status"].key termination_condition = network.results["Solver"][0]["Termination condition"].key if status == "ok" and termination_condition == "optimal": logger.info("Optimization successful") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) elif status == "warning" and termination_condition == "other": logger.warning("WARNING! Optimization might be sub-optimal. Writing output anyway") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) else: logger.error("Optimisation failed with status %s and terminal condition %s" % (status, termination_condition)) return status, termination_condition
[ "def", "network_lopf_solve", "(", "network", ",", "snapshots", "=", "None", ",", "formulation", "=", "\"angles\"", ",", "solver_options", "=", "{", "}", ",", "solver_logfile", "=", "None", ",", "keep_files", "=", "False", ",", "free_memory", "=", "{", "'pyom...
Solve linear optimal power flow for a group of snapshots and extract results. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"]; must match formulation used for building the model. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) solver_logfile : None|string If not None, sets the logfile option of the solver. keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None
[ "Solve", "linear", "optimal", "power", "flow", "for", "a", "group", "of", "snapshots", "and", "extract", "results", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L1428-L1503
224,627
PyPSA/PyPSA
pypsa/opf.py
network_lopf
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None, skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={}, keep_files=False, formulation="angles", ptdf_tolerance=0., free_memory={},extra_postprocessing=None): """ Linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. extra_functionality : callable function This function must take two arguments `extra_functionality(network,snapshots)` and is called after the model building is complete, but before it is sent to the solver. It allows the user to add/change constraints and add/change the objective function. solver_logfile : None|string If not None, sets the logfile option of the solver. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None """ snapshots = _as_snapshots(network, snapshots) network_lopf_build_model(network, snapshots, skip_pre=skip_pre, formulation=formulation, ptdf_tolerance=ptdf_tolerance) if extra_functionality is not None: extra_functionality(network,snapshots) network_lopf_prepare_solver(network, solver_name=solver_name, solver_io=solver_io) return network_lopf_solve(network, snapshots, formulation=formulation, solver_logfile=solver_logfile, solver_options=solver_options, keep_files=keep_files, free_memory=free_memory, extra_postprocessing=extra_postprocessing)
python
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None, skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={}, keep_files=False, formulation="angles", ptdf_tolerance=0., free_memory={},extra_postprocessing=None): snapshots = _as_snapshots(network, snapshots) network_lopf_build_model(network, snapshots, skip_pre=skip_pre, formulation=formulation, ptdf_tolerance=ptdf_tolerance) if extra_functionality is not None: extra_functionality(network,snapshots) network_lopf_prepare_solver(network, solver_name=solver_name, solver_io=solver_io) return network_lopf_solve(network, snapshots, formulation=formulation, solver_logfile=solver_logfile, solver_options=solver_options, keep_files=keep_files, free_memory=free_memory, extra_postprocessing=extra_postprocessing)
[ "def", "network_lopf", "(", "network", ",", "snapshots", "=", "None", ",", "solver_name", "=", "\"glpk\"", ",", "solver_io", "=", "None", ",", "skip_pre", "=", "False", ",", "extra_functionality", "=", "None", ",", "solver_logfile", "=", "None", ",", "solver...
Linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. extra_functionality : callable function This function must take two arguments `extra_functionality(network,snapshots)` and is called after the model building is complete, but before it is sent to the solver. It allows the user to add/change constraints and add/change the objective function. solver_logfile : None|string If not None, sets the logfile option of the solver. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None
[ "Linear", "optimal", "power", "flow", "for", "a", "group", "of", "snapshots", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opf.py#L1505-L1574
224,628
PyPSA/PyPSA
examples/sector-coupling/replace-generator-storage-units-with-store.py
replace_gen
def replace_gen(network,gen_to_replace): """Replace the generator gen_to_replace with a bus for the energy carrier, a link for the conversion from the energy carrier to electricity and a store to keep track of the depletion of the energy carrier and its CO2 emissions.""" gen = network.generators.loc[gen_to_replace] bus_name = "{} {}".format(gen["bus"], gen["carrier"]) link_name = "{} converter {} to AC".format(gen_to_replace,gen["carrier"]) store_name = "{} store {}".format(gen_to_replace,gen["carrier"]) network.add("Bus", bus_name, carrier=gen["carrier"]) network.add("Link", link_name, bus0=bus_name, bus1=gen["bus"], capital_cost=gen["capital_cost"]*gen["efficiency"], p_nom = gen["p_nom"]/gen["efficiency"], p_nom_extendable=gen["p_nom_extendable"], p_nom_max = gen["p_nom_max"]/gen["efficiency"], p_nom_min = gen["p_nom_min"]/gen["efficiency"], p_max_pu = network.generators_t.p_max_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_max_pu.columns else gen["p_max_pu"], p_min_pu = network.generators_t.p_min_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_min_pu.columns else gen["p_min_pu"], marginal_cost=gen["marginal_cost"]*gen["efficiency"], efficiency=gen["efficiency"]) network.add("Store", store_name, bus=bus_name, e_nom_min=-float("inf"), e_nom_max=0, e_nom_extendable=True, e_min_pu=1., e_max_pu=0.) network.remove("Generator",gen_to_replace) return bus_name, link_name, store_name
python
def replace_gen(network,gen_to_replace): gen = network.generators.loc[gen_to_replace] bus_name = "{} {}".format(gen["bus"], gen["carrier"]) link_name = "{} converter {} to AC".format(gen_to_replace,gen["carrier"]) store_name = "{} store {}".format(gen_to_replace,gen["carrier"]) network.add("Bus", bus_name, carrier=gen["carrier"]) network.add("Link", link_name, bus0=bus_name, bus1=gen["bus"], capital_cost=gen["capital_cost"]*gen["efficiency"], p_nom = gen["p_nom"]/gen["efficiency"], p_nom_extendable=gen["p_nom_extendable"], p_nom_max = gen["p_nom_max"]/gen["efficiency"], p_nom_min = gen["p_nom_min"]/gen["efficiency"], p_max_pu = network.generators_t.p_max_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_max_pu.columns else gen["p_max_pu"], p_min_pu = network.generators_t.p_min_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_min_pu.columns else gen["p_min_pu"], marginal_cost=gen["marginal_cost"]*gen["efficiency"], efficiency=gen["efficiency"]) network.add("Store", store_name, bus=bus_name, e_nom_min=-float("inf"), e_nom_max=0, e_nom_extendable=True, e_min_pu=1., e_max_pu=0.) network.remove("Generator",gen_to_replace) return bus_name, link_name, store_name
[ "def", "replace_gen", "(", "network", ",", "gen_to_replace", ")", ":", "gen", "=", "network", ".", "generators", ".", "loc", "[", "gen_to_replace", "]", "bus_name", "=", "\"{} {}\"", ".", "format", "(", "gen", "[", "\"bus\"", "]", ",", "gen", "[", "\"car...
Replace the generator gen_to_replace with a bus for the energy carrier, a link for the conversion from the energy carrier to electricity and a store to keep track of the depletion of the energy carrier and its CO2 emissions.
[ "Replace", "the", "generator", "gen_to_replace", "with", "a", "bus", "for", "the", "energy", "carrier", "a", "link", "for", "the", "conversion", "from", "the", "energy", "carrier", "to", "electricity", "and", "a", "store", "to", "keep", "track", "of", "the",...
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/examples/sector-coupling/replace-generator-storage-units-with-store.py#L12-L54
224,629
PyPSA/PyPSA
pypsa/descriptors.py
get_switchable_as_dense
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): """ Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu') """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
python
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
[ "def", "get_switchable_as_dense", "(", "network", ",", "component", ",", "attr", ",", "snapshots", "=", "None", ",", "inds", "=", "None", ")", ":", "df", "=", "network", ".", "df", "(", "component", ")", "pnl", "=", "network", ".", "pnl", "(", "compone...
Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu')
[ "Return", "a", "Dataframe", "for", "a", "time", "-", "varying", "component", "attribute", "with", "values", "for", "all", "non", "-", "time", "-", "varying", "components", "filled", "in", "with", "the", "default", "values", "for", "the", "attribute", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/descriptors.py#L152-L198
224,630
PyPSA/PyPSA
pypsa/descriptors.py
get_switchable_as_iter
def get_switchable_as_iter(network, component, attr, snapshots, inds=None): """ Return an iterator over snapshots for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these items rather than all of network.{generators,..}.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_iter(network, 'Generator', 'p_max_pu', snapshots) """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: inds = pd.Index(inds) index = inds.intersection(index) varying_i = inds.intersection(varying_i) fixed_i = inds.intersection(fixed_i) # Short-circuit only fixed if len(varying_i) == 0: return repeat(df.loc[fixed_i, attr], len(snapshots)) def is_same_indices(i1, i2): return len(i1) == len(i2) and (i1 == i2).all() if is_same_indices(fixed_i.append(varying_i), index): def reindex_maybe(s): return s else: def reindex_maybe(s): return s.reindex(index) return ( reindex_maybe(df.loc[fixed_i, attr].append(pnl[attr].loc[sn, varying_i])) for sn in snapshots )
python
def get_switchable_as_iter(network, component, attr, snapshots, inds=None): df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: inds = pd.Index(inds) index = inds.intersection(index) varying_i = inds.intersection(varying_i) fixed_i = inds.intersection(fixed_i) # Short-circuit only fixed if len(varying_i) == 0: return repeat(df.loc[fixed_i, attr], len(snapshots)) def is_same_indices(i1, i2): return len(i1) == len(i2) and (i1 == i2).all() if is_same_indices(fixed_i.append(varying_i), index): def reindex_maybe(s): return s else: def reindex_maybe(s): return s.reindex(index) return ( reindex_maybe(df.loc[fixed_i, attr].append(pnl[attr].loc[sn, varying_i])) for sn in snapshots )
[ "def", "get_switchable_as_iter", "(", "network", ",", "component", ",", "attr", ",", "snapshots", ",", "inds", "=", "None", ")", ":", "df", "=", "network", ".", "df", "(", "component", ")", "pnl", "=", "network", ".", "pnl", "(", "component", ")", "ind...
Return an iterator over snapshots for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these items rather than all of network.{generators,..}.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_iter(network, 'Generator', 'p_max_pu', snapshots)
[ "Return", "an", "iterator", "over", "snapshots", "for", "a", "time", "-", "varying", "component", "attribute", "with", "values", "for", "all", "non", "-", "time", "-", "varying", "components", "filled", "in", "with", "the", "default", "values", "for", "the",...
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/descriptors.py#L200-L254
224,631
PyPSA/PyPSA
pypsa/descriptors.py
allocate_series_dataframes
def allocate_series_dataframes(network, series): """ Populate time-varying outputs with default values. Parameters ---------- network : pypsa.Network series : dict Dictionary of components and their attributes to populate (see example) Returns ------- None Examples -------- >>> allocate_series_dataframes(network, {'Generator': ['p'], 'Load': ['p']}) """ for component, attributes in iteritems(series): df = network.df(component) pnl = network.pnl(component) for attr in attributes: pnl[attr] = pnl[attr].reindex(columns=df.index, fill_value=network.components[component]["attrs"].at[attr,"default"])
python
def allocate_series_dataframes(network, series): for component, attributes in iteritems(series): df = network.df(component) pnl = network.pnl(component) for attr in attributes: pnl[attr] = pnl[attr].reindex(columns=df.index, fill_value=network.components[component]["attrs"].at[attr,"default"])
[ "def", "allocate_series_dataframes", "(", "network", ",", "series", ")", ":", "for", "component", ",", "attributes", "in", "iteritems", "(", "series", ")", ":", "df", "=", "network", ".", "df", "(", "component", ")", "pnl", "=", "network", ".", "pnl", "(...
Populate time-varying outputs with default values. Parameters ---------- network : pypsa.Network series : dict Dictionary of components and their attributes to populate (see example) Returns ------- None Examples -------- >>> allocate_series_dataframes(network, {'Generator': ['p'], 'Load': ['p']})
[ "Populate", "time", "-", "varying", "outputs", "with", "default", "values", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/descriptors.py#L257-L285
224,632
PyPSA/PyPSA
pypsa/pf.py
network_pf
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False): """ Full non-linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. x_tol: float Tolerance for Newton-Raphson power flow. use_seed : bool, default False Use a seed for the initial guess for the Newton-Raphson algorithm. Returns ------- Dictionary with keys 'n_iter', 'converged', 'error' and dataframe values indicating number of iterations, convergence status, and iteration error for each snapshot (rows) and sub_network (columns) """ return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol, use_seed=use_seed)
python
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False): return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol, use_seed=use_seed)
[ "def", "network_pf", "(", "network", ",", "snapshots", "=", "None", ",", "skip_pre", "=", "False", ",", "x_tol", "=", "1e-6", ",", "use_seed", "=", "False", ")", ":", "return", "_network_prepare_and_run_pf", "(", "network", ",", "snapshots", ",", "skip_pre",...
Full non-linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. x_tol: float Tolerance for Newton-Raphson power flow. use_seed : bool, default False Use a seed for the initial guess for the Newton-Raphson algorithm. Returns ------- Dictionary with keys 'n_iter', 'converged', 'error' and dataframe values indicating number of iterations, convergence status, and iteration error for each snapshot (rows) and sub_network (columns)
[ "Full", "non", "-", "linear", "power", "flow", "for", "generic", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L129-L152
224,633
PyPSA/PyPSA
pypsa/pf.py
network_lpf
def network_lpf(network, snapshots=None, skip_pre=False): """ Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None """ _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
python
def network_lpf(network, snapshots=None, skip_pre=False): _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
[ "def", "network_lpf", "(", "network", ",", "snapshots", "=", "None", ",", "skip_pre", "=", "False", ")", ":", "_network_prepare_and_run_pf", "(", "network", ",", "snapshots", ",", "skip_pre", ",", "linear", "=", "True", ")" ]
Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None
[ "Linear", "power", "flow", "for", "generic", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L393-L411
224,634
PyPSA/PyPSA
pypsa/pf.py
apply_line_types
def apply_line_types(network): """Calculate line electrical parameters x, r, b, g from standard types. """ lines_with_types_b = network.lines.type != "" if lines_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique()) .difference(network.line_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types" .format(", ".join(missing_types))) # Get a copy of the lines data l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]] .join(network.line_types, on='type')) for attr in ["r","x"]: l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"] l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"] # now set calculated values on live lines for attr in ["r", "x", "b"]: network.lines.loc[lines_with_types_b, attr] = l[attr]
python
def apply_line_types(network): lines_with_types_b = network.lines.type != "" if lines_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique()) .difference(network.line_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types" .format(", ".join(missing_types))) # Get a copy of the lines data l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]] .join(network.line_types, on='type')) for attr in ["r","x"]: l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"] l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"] # now set calculated values on live lines for attr in ["r", "x", "b"]: network.lines.loc[lines_with_types_b, attr] = l[attr]
[ "def", "apply_line_types", "(", "network", ")", ":", "lines_with_types_b", "=", "network", ".", "lines", ".", "type", "!=", "\"\"", "if", "lines_with_types_b", ".", "zsum", "(", ")", "==", "0", ":", "return", "missing_types", "=", "(", "pd", ".", "Index", ...
Calculate line electrical parameters x, r, b, g from standard types.
[ "Calculate", "line", "electrical", "parameters", "x", "r", "b", "g", "from", "standard", "types", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L414-L439
224,635
PyPSA/PyPSA
pypsa/pf.py
apply_transformer_types
def apply_transformer_types(network): """Calculate transformer electrical parameters x, r, b, g from standard types. """ trafos_with_types_b = network.transformers.type != "" if trafos_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.transformers.loc[trafos_with_types_b, 'type'].unique()) .difference(network.transformer_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.transformer_types" .format(", ".join(missing_types))) # Get a copy of the transformers data # (joining pulls in "phase_shift", "s_nom", "tap_side" from TransformerType) t = (network.transformers.loc[trafos_with_types_b, ["type", "tap_position", "num_parallel"]] .join(network.transformer_types, on='type')) t["r"] = t["vscr"] /100. t["x"] = np.sqrt((t["vsc"]/100.)**2 - t["r"]**2) #NB: b and g are per unit of s_nom t["g"] = t["pfe"]/(1000. * t["s_nom"]) #for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2 t["b"] = - np.sqrt(((t["i0"]/100.)**2 - t["g"]**2).clip(lower=0)) for attr in ["r","x"]: t[attr] /= t["num_parallel"] for attr in ["b","g"]: t[attr] *= t["num_parallel"] #deal with tap positions t["tap_ratio"] = 1. + (t["tap_position"] - t["tap_neutral"]) * (t["tap_step"]/100.) # now set calculated values on live transformers for attr in ["r", "x", "g", "b", "phase_shift", "s_nom", "tap_side", "tap_ratio"]: network.transformers.loc[trafos_with_types_b, attr] = t[attr]
python
def apply_transformer_types(network): trafos_with_types_b = network.transformers.type != "" if trafos_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.transformers.loc[trafos_with_types_b, 'type'].unique()) .difference(network.transformer_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.transformer_types" .format(", ".join(missing_types))) # Get a copy of the transformers data # (joining pulls in "phase_shift", "s_nom", "tap_side" from TransformerType) t = (network.transformers.loc[trafos_with_types_b, ["type", "tap_position", "num_parallel"]] .join(network.transformer_types, on='type')) t["r"] = t["vscr"] /100. t["x"] = np.sqrt((t["vsc"]/100.)**2 - t["r"]**2) #NB: b and g are per unit of s_nom t["g"] = t["pfe"]/(1000. * t["s_nom"]) #for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2 t["b"] = - np.sqrt(((t["i0"]/100.)**2 - t["g"]**2).clip(lower=0)) for attr in ["r","x"]: t[attr] /= t["num_parallel"] for attr in ["b","g"]: t[attr] *= t["num_parallel"] #deal with tap positions t["tap_ratio"] = 1. + (t["tap_position"] - t["tap_neutral"]) * (t["tap_step"]/100.) # now set calculated values on live transformers for attr in ["r", "x", "g", "b", "phase_shift", "s_nom", "tap_side", "tap_ratio"]: network.transformers.loc[trafos_with_types_b, attr] = t[attr]
[ "def", "apply_transformer_types", "(", "network", ")", ":", "trafos_with_types_b", "=", "network", ".", "transformers", ".", "type", "!=", "\"\"", "if", "trafos_with_types_b", ".", "zsum", "(", ")", "==", "0", ":", "return", "missing_types", "=", "(", "pd", ...
Calculate transformer electrical parameters x, r, b, g from standard types.
[ "Calculate", "transformer", "electrical", "parameters", "x", "r", "b", "g", "from", "standard", "types", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L443-L484
224,636
PyPSA/PyPSA
pypsa/pf.py
apply_transformer_t_model
def apply_transformer_t_model(network): """Convert given T-model parameters to PI-model parameters using wye-delta transformation""" z_series = network.transformers.r_pu + 1j*network.transformers.x_pu y_shunt = network.transformers.g_pu + 1j*network.transformers.b_pu ts_b = (network.transformers.model == "t") & (y_shunt != 0.) if ts_b.zsum() == 0: return za,zb,zc = wye_to_delta(z_series.loc[ts_b]/2,z_series.loc[ts_b]/2,1/y_shunt.loc[ts_b]) network.transformers.loc[ts_b,"r_pu"] = zc.real network.transformers.loc[ts_b,"x_pu"] = zc.imag network.transformers.loc[ts_b,"g_pu"] = (2/za).real network.transformers.loc[ts_b,"b_pu"] = (2/za).imag
python
def apply_transformer_t_model(network): z_series = network.transformers.r_pu + 1j*network.transformers.x_pu y_shunt = network.transformers.g_pu + 1j*network.transformers.b_pu ts_b = (network.transformers.model == "t") & (y_shunt != 0.) if ts_b.zsum() == 0: return za,zb,zc = wye_to_delta(z_series.loc[ts_b]/2,z_series.loc[ts_b]/2,1/y_shunt.loc[ts_b]) network.transformers.loc[ts_b,"r_pu"] = zc.real network.transformers.loc[ts_b,"x_pu"] = zc.imag network.transformers.loc[ts_b,"g_pu"] = (2/za).real network.transformers.loc[ts_b,"b_pu"] = (2/za).imag
[ "def", "apply_transformer_t_model", "(", "network", ")", ":", "z_series", "=", "network", ".", "transformers", ".", "r_pu", "+", "1j", "*", "network", ".", "transformers", ".", "x_pu", "y_shunt", "=", "network", ".", "transformers", ".", "g_pu", "+", "1j", ...
Convert given T-model parameters to PI-model parameters using wye-delta transformation
[ "Convert", "given", "T", "-", "model", "parameters", "to", "PI", "-", "model", "parameters", "using", "wye", "-", "delta", "transformation" ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L495-L511
224,637
PyPSA/PyPSA
pypsa/pf.py
calculate_dependent_values
def calculate_dependent_values(network): """Calculate per unit impedances and append voltages to lines and shunt impedances.""" apply_line_types(network) apply_transformer_types(network) network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) network.lines["x_pu"] = network.lines.x/(network.lines.v_nom**2) network.lines["r_pu"] = network.lines.r/(network.lines.v_nom**2) network.lines["b_pu"] = network.lines.b*network.lines.v_nom**2 network.lines["g_pu"] = network.lines.g*network.lines.v_nom**2 network.lines["x_pu_eff"] = network.lines["x_pu"] network.lines["r_pu_eff"] = network.lines["r_pu"] #convert transformer impedances from base power s_nom to base = 1 MVA network.transformers["x_pu"] = network.transformers.x/network.transformers.s_nom network.transformers["r_pu"] = network.transformers.r/network.transformers.s_nom network.transformers["b_pu"] = network.transformers.b*network.transformers.s_nom network.transformers["g_pu"] = network.transformers.g*network.transformers.s_nom network.transformers["x_pu_eff"] = network.transformers["x_pu"]* network.transformers["tap_ratio"] network.transformers["r_pu_eff"] = network.transformers["r_pu"]* network.transformers["tap_ratio"] apply_transformer_t_model(network) network.shunt_impedances["v_nom"] = network.shunt_impedances["bus"].map(network.buses.v_nom) network.shunt_impedances["b_pu"] = network.shunt_impedances.b*network.shunt_impedances.v_nom**2 network.shunt_impedances["g_pu"] = network.shunt_impedances.g*network.shunt_impedances.v_nom**2
python
def calculate_dependent_values(network): apply_line_types(network) apply_transformer_types(network) network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) network.lines["x_pu"] = network.lines.x/(network.lines.v_nom**2) network.lines["r_pu"] = network.lines.r/(network.lines.v_nom**2) network.lines["b_pu"] = network.lines.b*network.lines.v_nom**2 network.lines["g_pu"] = network.lines.g*network.lines.v_nom**2 network.lines["x_pu_eff"] = network.lines["x_pu"] network.lines["r_pu_eff"] = network.lines["r_pu"] #convert transformer impedances from base power s_nom to base = 1 MVA network.transformers["x_pu"] = network.transformers.x/network.transformers.s_nom network.transformers["r_pu"] = network.transformers.r/network.transformers.s_nom network.transformers["b_pu"] = network.transformers.b*network.transformers.s_nom network.transformers["g_pu"] = network.transformers.g*network.transformers.s_nom network.transformers["x_pu_eff"] = network.transformers["x_pu"]* network.transformers["tap_ratio"] network.transformers["r_pu_eff"] = network.transformers["r_pu"]* network.transformers["tap_ratio"] apply_transformer_t_model(network) network.shunt_impedances["v_nom"] = network.shunt_impedances["bus"].map(network.buses.v_nom) network.shunt_impedances["b_pu"] = network.shunt_impedances.b*network.shunt_impedances.v_nom**2 network.shunt_impedances["g_pu"] = network.shunt_impedances.g*network.shunt_impedances.v_nom**2
[ "def", "calculate_dependent_values", "(", "network", ")", ":", "apply_line_types", "(", "network", ")", "apply_transformer_types", "(", "network", ")", "network", ".", "lines", "[", "\"v_nom\"", "]", "=", "network", ".", "lines", ".", "bus0", ".", "map", "(", ...
Calculate per unit impedances and append voltages to lines and shunt impedances.
[ "Calculate", "per", "unit", "impedances", "and", "append", "voltages", "to", "lines", "and", "shunt", "impedances", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L514-L542
224,638
PyPSA/PyPSA
pypsa/pf.py
find_slack_bus
def find_slack_bus(sub_network): """Find the slack bus in a connected sub-network.""" gens = sub_network.generators() if len(gens) == 0: logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) sub_network.slack_generator = None sub_network.slack_bus = sub_network.buses_i()[0] else: slacks = gens[gens.control == "Slack"].index if len(slacks) == 0: sub_network.slack_generator = gens.index[0] sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack" logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) elif len(slacks) == 1: sub_network.slack_generator = slacks[0] else: sub_network.slack_generator = slacks[0] sub_network.network.generators.loc[slacks[1:],"control"] = "PV" logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) sub_network.slack_bus = gens.bus[sub_network.slack_generator] #also put it into the dataframe sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
python
def find_slack_bus(sub_network): gens = sub_network.generators() if len(gens) == 0: logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) sub_network.slack_generator = None sub_network.slack_bus = sub_network.buses_i()[0] else: slacks = gens[gens.control == "Slack"].index if len(slacks) == 0: sub_network.slack_generator = gens.index[0] sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack" logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) elif len(slacks) == 1: sub_network.slack_generator = slacks[0] else: sub_network.slack_generator = slacks[0] sub_network.network.generators.loc[slacks[1:],"control"] = "PV" logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) sub_network.slack_bus = gens.bus[sub_network.slack_generator] #also put it into the dataframe sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
[ "def", "find_slack_bus", "(", "sub_network", ")", ":", "gens", "=", "sub_network", ".", "generators", "(", ")", "if", "len", "(", "gens", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"No generators in sub-network {}, better hope power is already balanced\""...
Find the slack bus in a connected sub-network.
[ "Find", "the", "slack", "bus", "in", "a", "connected", "sub", "-", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L545-L576
224,639
PyPSA/PyPSA
pypsa/pf.py
find_bus_controls
def find_bus_controls(sub_network): """Find slack and all PV and PQ buses for a sub_network. This function also fixes sub_network.buses_o, a DataFrame ordered by control type.""" network = sub_network.network find_slack_bus(sub_network) gens = sub_network.generators() buses_i = sub_network.buses_i() #default bus control is PQ network.buses.loc[buses_i, "control"] = "PQ" #find all buses with one or more gens with PV pvs = gens[gens.control == 'PV'].index.to_series() if len(pvs) > 0: pvs = pvs.groupby(gens.bus).first() network.buses.loc[pvs.index, "control"] = "PV" network.buses.loc[pvs.index, "generator"] = pvs network.buses.loc[sub_network.slack_bus, "control"] = "Slack" network.buses.loc[sub_network.slack_bus, "generator"] = sub_network.slack_generator buses_control = network.buses.loc[buses_i, "control"] sub_network.pvs = buses_control.index[buses_control == "PV"] sub_network.pqs = buses_control.index[buses_control == "PQ"] sub_network.pvpqs = sub_network.pvs.append(sub_network.pqs) # order buses sub_network.buses_o = sub_network.pvpqs.insert(0, sub_network.slack_bus)
python
def find_bus_controls(sub_network): network = sub_network.network find_slack_bus(sub_network) gens = sub_network.generators() buses_i = sub_network.buses_i() #default bus control is PQ network.buses.loc[buses_i, "control"] = "PQ" #find all buses with one or more gens with PV pvs = gens[gens.control == 'PV'].index.to_series() if len(pvs) > 0: pvs = pvs.groupby(gens.bus).first() network.buses.loc[pvs.index, "control"] = "PV" network.buses.loc[pvs.index, "generator"] = pvs network.buses.loc[sub_network.slack_bus, "control"] = "Slack" network.buses.loc[sub_network.slack_bus, "generator"] = sub_network.slack_generator buses_control = network.buses.loc[buses_i, "control"] sub_network.pvs = buses_control.index[buses_control == "PV"] sub_network.pqs = buses_control.index[buses_control == "PQ"] sub_network.pvpqs = sub_network.pvs.append(sub_network.pqs) # order buses sub_network.buses_o = sub_network.pvpqs.insert(0, sub_network.slack_bus)
[ "def", "find_bus_controls", "(", "sub_network", ")", ":", "network", "=", "sub_network", ".", "network", "find_slack_bus", "(", "sub_network", ")", "gens", "=", "sub_network", ".", "generators", "(", ")", "buses_i", "=", "sub_network", ".", "buses_i", "(", ")"...
Find slack and all PV and PQ buses for a sub_network. This function also fixes sub_network.buses_o, a DataFrame ordered by control type.
[ "Find", "slack", "and", "all", "PV", "and", "PQ", "buses", "for", "a", "sub_network", ".", "This", "function", "also", "fixes", "sub_network", ".", "buses_o", "a", "DataFrame", "ordered", "by", "control", "type", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L579-L611
224,640
PyPSA/PyPSA
pypsa/pf.py
calculate_B_H
def calculate_B_H(sub_network,skip_pre=False): """Calculate B and H matrices for AC or DC sub-networks.""" network = sub_network.network if not skip_pre: calculate_dependent_values(network) find_bus_controls(sub_network) if network.sub_networks.at[sub_network.name,"carrier"] == "DC": attribute="r_pu_eff" else: attribute="x_pu_eff" #following leans heavily on pypower.makeBdc #susceptances b = 1./np.concatenate([(c.df.loc[c.ind, attribute]).values \ for c in sub_network.iterate_components(network.passive_branch_components)]) if np.isnan(b).any(): logger.warning("Warning! Some series impedances are zero - this will cause a singularity in LPF!") b_diag = csr_matrix((b, (r_[:len(b)], r_[:len(b)]))) #incidence matrix sub_network.K = sub_network.incidence_matrix(busorder=sub_network.buses_o) sub_network.H = b_diag*sub_network.K.T #weighted Laplacian sub_network.B = sub_network.K * sub_network.H sub_network.p_branch_shift = -b*np.concatenate([(c.df.loc[c.ind, "phase_shift"]).values*np.pi/180. if c.name == "Transformer" else np.zeros((len(c.ind),)) for c in sub_network.iterate_components(network.passive_branch_components)]) sub_network.p_bus_shift = sub_network.K * sub_network.p_branch_shift
python
def calculate_B_H(sub_network,skip_pre=False): network = sub_network.network if not skip_pre: calculate_dependent_values(network) find_bus_controls(sub_network) if network.sub_networks.at[sub_network.name,"carrier"] == "DC": attribute="r_pu_eff" else: attribute="x_pu_eff" #following leans heavily on pypower.makeBdc #susceptances b = 1./np.concatenate([(c.df.loc[c.ind, attribute]).values \ for c in sub_network.iterate_components(network.passive_branch_components)]) if np.isnan(b).any(): logger.warning("Warning! Some series impedances are zero - this will cause a singularity in LPF!") b_diag = csr_matrix((b, (r_[:len(b)], r_[:len(b)]))) #incidence matrix sub_network.K = sub_network.incidence_matrix(busorder=sub_network.buses_o) sub_network.H = b_diag*sub_network.K.T #weighted Laplacian sub_network.B = sub_network.K * sub_network.H sub_network.p_branch_shift = -b*np.concatenate([(c.df.loc[c.ind, "phase_shift"]).values*np.pi/180. if c.name == "Transformer" else np.zeros((len(c.ind),)) for c in sub_network.iterate_components(network.passive_branch_components)]) sub_network.p_bus_shift = sub_network.K * sub_network.p_branch_shift
[ "def", "calculate_B_H", "(", "sub_network", ",", "skip_pre", "=", "False", ")", ":", "network", "=", "sub_network", ".", "network", "if", "not", "skip_pre", ":", "calculate_dependent_values", "(", "network", ")", "find_bus_controls", "(", "sub_network", ")", "if...
Calculate B and H matrices for AC or DC sub-networks.
[ "Calculate", "B", "and", "H", "matrices", "for", "AC", "or", "DC", "sub", "-", "networks", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L614-L652
224,641
PyPSA/PyPSA
pypsa/pf.py
find_tree
def find_tree(sub_network, weight='x_pu'): """Get the spanning tree of the graph, choose the node with the highest degree as a central "tree slack" and then see for each branch which paths from the slack to each node go through the branch. """ branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index buses_i = sub_network.buses_i() graph = sub_network.graph(weight=weight, inf_weight=1.) sub_network.tree = nx.minimum_spanning_tree(graph) #find bus with highest degree to use as slack tree_slack_bus, slack_degree = max(degree(sub_network.tree), key=itemgetter(1)) logger.info("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree) #determine which buses are supplied in tree through branch from slack #matrix to store tree structure sub_network.T = dok_matrix((len(branches_i),len(buses_i))) for j,bus in enumerate(buses_i): path = nx.shortest_path(sub_network.tree,bus,tree_slack_bus) for i in range(len(path)-1): branch = next(iterkeys(graph[path[i]][path[i+1]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == path[i] else -1 sub_network.T[branch_i,j] = sign
python
def find_tree(sub_network, weight='x_pu'): branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index buses_i = sub_network.buses_i() graph = sub_network.graph(weight=weight, inf_weight=1.) sub_network.tree = nx.minimum_spanning_tree(graph) #find bus with highest degree to use as slack tree_slack_bus, slack_degree = max(degree(sub_network.tree), key=itemgetter(1)) logger.info("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree) #determine which buses are supplied in tree through branch from slack #matrix to store tree structure sub_network.T = dok_matrix((len(branches_i),len(buses_i))) for j,bus in enumerate(buses_i): path = nx.shortest_path(sub_network.tree,bus,tree_slack_bus) for i in range(len(path)-1): branch = next(iterkeys(graph[path[i]][path[i+1]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == path[i] else -1 sub_network.T[branch_i,j] = sign
[ "def", "find_tree", "(", "sub_network", ",", "weight", "=", "'x_pu'", ")", ":", "branches_bus0", "=", "sub_network", ".", "branches", "(", ")", "[", "\"bus0\"", "]", "branches_i", "=", "branches_bus0", ".", "index", "buses_i", "=", "sub_network", ".", "buses...
Get the spanning tree of the graph, choose the node with the highest degree as a central "tree slack" and then see for each branch which paths from the slack to each node go through the branch.
[ "Get", "the", "spanning", "tree", "of", "the", "graph", "choose", "the", "node", "with", "the", "highest", "degree", "as", "a", "central", "tree", "slack", "and", "then", "see", "for", "each", "branch", "which", "paths", "from", "the", "slack", "to", "ea...
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L818-L848
224,642
PyPSA/PyPSA
pypsa/pf.py
find_cycles
def find_cycles(sub_network, weight='x_pu'): """ Find all cycles in the sub_network and record them in sub_network.C. networkx collects the cycles with more than 2 edges; then the 2-edge cycles from the MultiGraph must be collected separately (for cases where there are multiple lines between the same pairs of buses). Cycles with infinite impedance are skipped. """ branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index #reduce to a non-multi-graph for cycles with > 2 edges mgraph = sub_network.graph(weight=weight, inf_weight=False) graph = nx.OrderedGraph(mgraph) cycles = nx.cycle_basis(graph) #number of 2-edge cycles num_multi = len(mgraph.edges()) - len(graph.edges()) sub_network.C = dok_matrix((len(branches_bus0),len(cycles)+num_multi)) for j,cycle in enumerate(cycles): for i in range(len(cycle)): branch = next(iterkeys(mgraph[cycle[i]][cycle[(i+1)%len(cycle)]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == cycle[i] else -1 sub_network.C[branch_i,j] += sign #counter for multis c = len(cycles) #add multi-graph 2-edge cycles for multiple branches between same pairs of buses for u,v in graph.edges(): bs = list(mgraph[u][v].keys()) if len(bs) > 1: first = bs[0] first_i = branches_i.get_loc(first) for b in bs[1:]: b_i = branches_i.get_loc(b) sign = -1 if branches_bus0.iat[b_i] == branches_bus0.iat[first_i] else +1 sub_network.C[first_i,c] = 1 sub_network.C[b_i,c] = sign c+=1
python
def find_cycles(sub_network, weight='x_pu'): branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index #reduce to a non-multi-graph for cycles with > 2 edges mgraph = sub_network.graph(weight=weight, inf_weight=False) graph = nx.OrderedGraph(mgraph) cycles = nx.cycle_basis(graph) #number of 2-edge cycles num_multi = len(mgraph.edges()) - len(graph.edges()) sub_network.C = dok_matrix((len(branches_bus0),len(cycles)+num_multi)) for j,cycle in enumerate(cycles): for i in range(len(cycle)): branch = next(iterkeys(mgraph[cycle[i]][cycle[(i+1)%len(cycle)]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == cycle[i] else -1 sub_network.C[branch_i,j] += sign #counter for multis c = len(cycles) #add multi-graph 2-edge cycles for multiple branches between same pairs of buses for u,v in graph.edges(): bs = list(mgraph[u][v].keys()) if len(bs) > 1: first = bs[0] first_i = branches_i.get_loc(first) for b in bs[1:]: b_i = branches_i.get_loc(b) sign = -1 if branches_bus0.iat[b_i] == branches_bus0.iat[first_i] else +1 sub_network.C[first_i,c] = 1 sub_network.C[b_i,c] = sign c+=1
[ "def", "find_cycles", "(", "sub_network", ",", "weight", "=", "'x_pu'", ")", ":", "branches_bus0", "=", "sub_network", ".", "branches", "(", ")", "[", "\"bus0\"", "]", "branches_i", "=", "branches_bus0", ".", "index", "#reduce to a non-multi-graph for cycles with > ...
Find all cycles in the sub_network and record them in sub_network.C. networkx collects the cycles with more than 2 edges; then the 2-edge cycles from the MultiGraph must be collected separately (for cases where there are multiple lines between the same pairs of buses). Cycles with infinite impedance are skipped.
[ "Find", "all", "cycles", "in", "the", "sub_network", "and", "record", "them", "in", "sub_network", ".", "C", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L851-L897
224,643
PyPSA/PyPSA
pypsa/contingency.py
network_lpf_contingency
def network_lpf_contingency(network, snapshots=None, branch_outages=None): """ Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows """ if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
python
def network_lpf_contingency(network, snapshots=None, branch_outages=None): if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
[ "def", "network_lpf_contingency", "(", "network", ",", "snapshots", "=", "None", ",", "branch_outages", "=", "None", ")", ":", "if", "snapshots", "is", "None", ":", "snapshots", "=", "network", ".", "snapshots", "if", "isinstance", "(", "snapshots", ",", "co...
Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows
[ "Computes", "linear", "power", "flow", "for", "a", "selection", "of", "branch", "outages", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/contingency.py#L85-L152
224,644
PyPSA/PyPSA
pypsa/plot.py
compute_bbox_with_margins
def compute_bbox_with_margins(margin, x, y): 'Helper function to compute bounding box for the plot' # set margins pos = np.asarray((x, y)) minxy, maxxy = pos.min(axis=1), pos.max(axis=1) xy1 = minxy - margin*(maxxy - minxy) xy2 = maxxy + margin*(maxxy - minxy) return tuple(xy1), tuple(xy2)
python
def compute_bbox_with_margins(margin, x, y): 'Helper function to compute bounding box for the plot' # set margins pos = np.asarray((x, y)) minxy, maxxy = pos.min(axis=1), pos.max(axis=1) xy1 = minxy - margin*(maxxy - minxy) xy2 = maxxy + margin*(maxxy - minxy) return tuple(xy1), tuple(xy2)
[ "def", "compute_bbox_with_margins", "(", "margin", ",", "x", ",", "y", ")", ":", "# set margins", "pos", "=", "np", ".", "asarray", "(", "(", "x", ",", "y", ")", ")", "minxy", ",", "maxxy", "=", "pos", ".", "min", "(", "axis", "=", "1", ")", ",",...
Helper function to compute bounding box for the plot
[ "Helper", "function", "to", "compute", "bounding", "box", "for", "the", "plot" ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/plot.py#L298-L305
224,645
PyPSA/PyPSA
pypsa/plot.py
projected_area_factor
def projected_area_factor(ax, original_crs): """ Helper function to get the area scale of the current projection in reference to the default projection. """ if not hasattr(ax, 'projection'): return 1 if isinstance(ax.projection, ccrs.PlateCarree): return 1 x1, x2, y1, y2 = ax.get_extent() pbounds = \ get_projection_from_crs(original_crs).transform_points(ax.projection, np.array([x1, x2]), np.array([y1, y2])) return np.sqrt(abs((x2 - x1) * (y2 - y1)) /abs((pbounds[0] - pbounds[1])[:2].prod()))
python
def projected_area_factor(ax, original_crs): if not hasattr(ax, 'projection'): return 1 if isinstance(ax.projection, ccrs.PlateCarree): return 1 x1, x2, y1, y2 = ax.get_extent() pbounds = \ get_projection_from_crs(original_crs).transform_points(ax.projection, np.array([x1, x2]), np.array([y1, y2])) return np.sqrt(abs((x2 - x1) * (y2 - y1)) /abs((pbounds[0] - pbounds[1])[:2].prod()))
[ "def", "projected_area_factor", "(", "ax", ",", "original_crs", ")", ":", "if", "not", "hasattr", "(", "ax", ",", "'projection'", ")", ":", "return", "1", "if", "isinstance", "(", "ax", ".", "projection", ",", "ccrs", ".", "PlateCarree", ")", ":", "retur...
Helper function to get the area scale of the current projection in reference to the default projection.
[ "Helper", "function", "to", "get", "the", "area", "scale", "of", "the", "current", "projection", "in", "reference", "to", "the", "default", "projection", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/plot.py#L308-L323
224,646
PyPSA/PyPSA
pypsa/io.py
_export_to_exporter
def _export_to_exporter(network, exporter, basename, export_standard_types=False): """ Export to exporter. Both static and series attributes of components are exported, but only if they have non-default values. Parameters ---------- exporter : Exporter Initialized exporter instance basename : str Basename, used for logging export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk). """ #exportable component types #what about None???? - nan is float? allowed_types = (float,int,bool) + string_types + tuple(np.typeDict.values()) #first export network properties attrs = dict((attr, getattr(network, attr)) for attr in dir(network) if (not attr.startswith("__") and isinstance(getattr(network,attr), allowed_types))) exporter.save_attributes(attrs) #now export snapshots snapshots = pd.DataFrame(dict(weightings=network.snapshot_weightings), index=pd.Index(network.snapshots, name="name")) exporter.save_snapshots(snapshots) exported_components = [] for component in network.all_components - {"SubNetwork"}: list_name = network.components[component]["list_name"] attrs = network.components[component]["attrs"] df = network.df(component) pnl = network.pnl(component) if not export_standard_types and component in network.standard_type_components: df = df.drop(network.components[component]["standard_types"].index) # first do static attributes df.index.name = "name" if df.empty: exporter.remove_static(list_name) continue col_export = [] for col in df.columns: # do not export derived attributes if col in ["sub_network", "r_pu", "x_pu", "g_pu", "b_pu"]: continue if col in attrs.index and pd.isnull(attrs.at[col, "default"]) and pd.isnull(df[col]).all(): continue if (col in attrs.index and df[col].dtype == attrs.at[col, 'dtype'] and (df[col] == attrs.at[col, "default"]).all()): continue col_export.append(col) exporter.save_static(list_name, df[col_export]) #now do varying attributes for attr in pnl: if attr not in attrs.index: col_export = pnl[attr].columns else: default = attrs.at[attr, "default"] if pd.isnull(default): col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()] else: col_export = pnl[attr].columns[(pnl[attr] != default).any()] if len(col_export) > 0: df = pnl[attr][col_export] exporter.save_series(list_name, attr, df) else: exporter.remove_series(list_name, attr) exported_components.append(list_name) logger.info("Exported network {} has {}".format(basename, ", ".join(exported_components)))
python
def _export_to_exporter(network, exporter, basename, export_standard_types=False): #exportable component types #what about None???? - nan is float? allowed_types = (float,int,bool) + string_types + tuple(np.typeDict.values()) #first export network properties attrs = dict((attr, getattr(network, attr)) for attr in dir(network) if (not attr.startswith("__") and isinstance(getattr(network,attr), allowed_types))) exporter.save_attributes(attrs) #now export snapshots snapshots = pd.DataFrame(dict(weightings=network.snapshot_weightings), index=pd.Index(network.snapshots, name="name")) exporter.save_snapshots(snapshots) exported_components = [] for component in network.all_components - {"SubNetwork"}: list_name = network.components[component]["list_name"] attrs = network.components[component]["attrs"] df = network.df(component) pnl = network.pnl(component) if not export_standard_types and component in network.standard_type_components: df = df.drop(network.components[component]["standard_types"].index) # first do static attributes df.index.name = "name" if df.empty: exporter.remove_static(list_name) continue col_export = [] for col in df.columns: # do not export derived attributes if col in ["sub_network", "r_pu", "x_pu", "g_pu", "b_pu"]: continue if col in attrs.index and pd.isnull(attrs.at[col, "default"]) and pd.isnull(df[col]).all(): continue if (col in attrs.index and df[col].dtype == attrs.at[col, 'dtype'] and (df[col] == attrs.at[col, "default"]).all()): continue col_export.append(col) exporter.save_static(list_name, df[col_export]) #now do varying attributes for attr in pnl: if attr not in attrs.index: col_export = pnl[attr].columns else: default = attrs.at[attr, "default"] if pd.isnull(default): col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()] else: col_export = pnl[attr].columns[(pnl[attr] != default).any()] if len(col_export) > 0: df = pnl[attr][col_export] exporter.save_series(list_name, attr, df) else: exporter.remove_series(list_name, attr) exported_components.append(list_name) logger.info("Exported network {} has {}".format(basename, ", ".join(exported_components)))
[ "def", "_export_to_exporter", "(", "network", ",", "exporter", ",", "basename", ",", "export_standard_types", "=", "False", ")", ":", "#exportable component types", "#what about None???? - nan is float?", "allowed_types", "=", "(", "float", ",", "int", ",", "bool", ")...
Export to exporter. Both static and series attributes of components are exported, but only if they have non-default values. Parameters ---------- exporter : Exporter Initialized exporter instance basename : str Basename, used for logging export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk).
[ "Export", "to", "exporter", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L282-L370
224,647
PyPSA/PyPSA
pypsa/io.py
import_from_csv_folder
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False): """ Import network data from CSVs in a folder. The CSVs must follow the standard form, see pypsa/examples. Parameters ---------- csv_folder_name : string Name of folder encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ skip_time : bool, default False Skip reading in time dependent attributes """ basename = os.path.basename(csv_folder_name) with ImporterCSV(csv_folder_name, encoding=encoding) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False): basename = os.path.basename(csv_folder_name) with ImporterCSV(csv_folder_name, encoding=encoding) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
[ "def", "import_from_csv_folder", "(", "network", ",", "csv_folder_name", ",", "encoding", "=", "None", ",", "skip_time", "=", "False", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "csv_folder_name", ")", "with", "ImporterCSV", "(", "cs...
Import network data from CSVs in a folder. The CSVs must follow the standard form, see pypsa/examples. Parameters ---------- csv_folder_name : string Name of folder encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ skip_time : bool, default False Skip reading in time dependent attributes
[ "Import", "network", "data", "from", "CSVs", "in", "a", "folder", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L372-L392
224,648
PyPSA/PyPSA
pypsa/io.py
export_to_csv_folder
def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standard_types=False): """ Export network and components to a folder of CSVs. Both static and series attributes of components are exported, but only if they have non-default values. If csv_folder_name does not already exist, it is created. Parameters ---------- csv_folder_name : string Name of folder to which to export. encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk). Examples -------- >>> export_to_csv(network,csv_folder_name) OR >>> network.export_to_csv(csv_folder_name) """ basename = os.path.basename(csv_folder_name) with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
python
def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standard_types=False): basename = os.path.basename(csv_folder_name) with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
[ "def", "export_to_csv_folder", "(", "network", ",", "csv_folder_name", ",", "encoding", "=", "None", ",", "export_standard_types", "=", "False", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "csv_folder_name", ")", "with", "ExporterCSV", ...
Export network and components to a folder of CSVs. Both static and series attributes of components are exported, but only if they have non-default values. If csv_folder_name does not already exist, it is created. Parameters ---------- csv_folder_name : string Name of folder to which to export. encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk). Examples -------- >>> export_to_csv(network,csv_folder_name) OR >>> network.export_to_csv(csv_folder_name)
[ "Export", "network", "and", "components", "to", "a", "folder", "of", "CSVs", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L394-L425
224,649
PyPSA/PyPSA
pypsa/io.py
import_from_hdf5
def import_from_hdf5(network, path, skip_time=False): """ Import network data from HDF5 store at `path`. Parameters ---------- path : string Name of HDF5 store skip_time : bool, default False Skip reading in time dependent attributes """ basename = os.path.basename(path) with ImporterHDF5(path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
def import_from_hdf5(network, path, skip_time=False): basename = os.path.basename(path) with ImporterHDF5(path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
[ "def", "import_from_hdf5", "(", "network", ",", "path", ",", "skip_time", "=", "False", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "with", "ImporterHDF5", "(", "path", ")", "as", "importer", ":", "_import_from_importer...
Import network data from HDF5 store at `path`. Parameters ---------- path : string Name of HDF5 store skip_time : bool, default False Skip reading in time dependent attributes
[ "Import", "network", "data", "from", "HDF5", "store", "at", "path", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L427-L441
224,650
PyPSA/PyPSA
pypsa/io.py
export_to_hdf5
def export_to_hdf5(network, path, export_standard_types=False, **kwargs): """ Export network and components to an HDF store. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. Parameters ---------- path : string Name of hdf5 file to which to export (if it exists, it is overwritten) **kwargs Extra arguments for pd.HDFStore to specify f.i. compression (default: complevel=4) Examples -------- >>> export_to_hdf5(network, filename) OR >>> network.export_to_hdf5(filename) """ kwargs.setdefault('complevel', 4) basename = os.path.basename(path) with ExporterHDF5(path, **kwargs) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
python
def export_to_hdf5(network, path, export_standard_types=False, **kwargs): kwargs.setdefault('complevel', 4) basename = os.path.basename(path) with ExporterHDF5(path, **kwargs) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
[ "def", "export_to_hdf5", "(", "network", ",", "path", ",", "export_standard_types", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'complevel'", ",", "4", ")", "basename", "=", "os", ".", "path", ".", "basename", "(",...
Export network and components to an HDF store. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. Parameters ---------- path : string Name of hdf5 file to which to export (if it exists, it is overwritten) **kwargs Extra arguments for pd.HDFStore to specify f.i. compression (default: complevel=4) Examples -------- >>> export_to_hdf5(network, filename) OR >>> network.export_to_hdf5(filename)
[ "Export", "network", "and", "components", "to", "an", "HDF", "store", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L443-L472
224,651
PyPSA/PyPSA
pypsa/io.py
import_from_netcdf
def import_from_netcdf(network, path, skip_time=False): """ Import network data from netCDF file or xarray Dataset at `path`. Parameters ---------- path : string|xr.Dataset Path to netCDF dataset or instance of xarray Dataset skip_time : bool, default False Skip reading in time dependent attributes """ assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if isinstance(path, string_types) else None with ImporterNetCDF(path=path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
def import_from_netcdf(network, path, skip_time=False): assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if isinstance(path, string_types) else None with ImporterNetCDF(path=path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
[ "def", "import_from_netcdf", "(", "network", ",", "path", ",", "skip_time", "=", "False", ")", ":", "assert", "has_xarray", ",", "\"xarray must be installed for netCDF support.\"", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "isi...
Import network data from netCDF file or xarray Dataset at `path`. Parameters ---------- path : string|xr.Dataset Path to netCDF dataset or instance of xarray Dataset skip_time : bool, default False Skip reading in time dependent attributes
[ "Import", "network", "data", "from", "netCDF", "file", "or", "xarray", "Dataset", "at", "path", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L474-L491
224,652
PyPSA/PyPSA
pypsa/io.py
export_to_netcdf
def export_to_netcdf(network, path=None, export_standard_types=False, least_significant_digit=None): """Export network and components to a netCDF file. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. If no path is passed, no file is exported, but the xarray.Dataset is still returned. Be aware that this cannot export boolean attributes on the Network class, e.g. network.my_bool = False is not supported by netCDF. Parameters ---------- path : string|None Name of netCDF file to which to export (if it exists, it is overwritten); if None is passed, no file is exported. least_significant_digit This is passed to the netCDF exporter, but currently makes no difference to file size or float accuracy. We're working on improving this... Returns ------- ds : xarray.Dataset Examples -------- >>> export_to_netcdf(network, "my_file.nc") OR >>> network.export_to_netcdf("my_file.nc") """ assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if path is not None else None with ExporterNetCDF(path, least_significant_digit) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types) return exporter.ds
python
def export_to_netcdf(network, path=None, export_standard_types=False, least_significant_digit=None): assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if path is not None else None with ExporterNetCDF(path, least_significant_digit) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types) return exporter.ds
[ "def", "export_to_netcdf", "(", "network", ",", "path", "=", "None", ",", "export_standard_types", "=", "False", ",", "least_significant_digit", "=", "None", ")", ":", "assert", "has_xarray", ",", "\"xarray must be installed for netCDF support.\"", "basename", "=", "o...
Export network and components to a netCDF file. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. If no path is passed, no file is exported, but the xarray.Dataset is still returned. Be aware that this cannot export boolean attributes on the Network class, e.g. network.my_bool = False is not supported by netCDF. Parameters ---------- path : string|None Name of netCDF file to which to export (if it exists, it is overwritten); if None is passed, no file is exported. least_significant_digit This is passed to the netCDF exporter, but currently makes no difference to file size or float accuracy. We're working on improving this... Returns ------- ds : xarray.Dataset Examples -------- >>> export_to_netcdf(network, "my_file.nc") OR >>> network.export_to_netcdf("my_file.nc")
[ "Export", "network", "and", "components", "to", "a", "netCDF", "file", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L493-L535
224,653
PyPSA/PyPSA
pypsa/io.py
_import_from_importer
def _import_from_importer(network, importer, basename, skip_time=False): """ Import network data from importer. Parameters ---------- skip_time : bool Skip importing time """ attrs = importer.get_attributes() current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")] pypsa_version = None if attrs is not None: network.name = attrs.pop('name') try: pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")] except KeyError: pypsa_version = None for attr, val in iteritems(attrs): setattr(network, attr, val) ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types if pypsa_version is None or pypsa_version < current_pypsa_version: logger.warning(dedent(""" Importing PyPSA from older version of PyPSA than current version {}. Please read the release notes at https://pypsa.org/doc/release_notes.html carefully to prepare your network for import. """).format(network.pypsa_version)) importer.pypsa_version = pypsa_version importer.current_pypsa_version = current_pypsa_version # if there is snapshots.csv, read in snapshot data df = importer.get_snapshots() if df is not None: network.set_snapshots(df.index) if "weightings" in df.columns: network.snapshot_weightings = df["weightings"].reindex(network.snapshots) imported_components = [] # now read in other components; make sure buses and carriers come first for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}): list_name = network.components[component]["list_name"] df = importer.get_static(list_name) if df is None: if component == "Bus": logger.error("Error, no buses found") return else: continue import_components_from_dataframe(network, df, component) if not skip_time: for attr, df in importer.get_series(list_name): import_series_from_dataframe(network, df, component, attr) logger.debug(getattr(network,list_name)) imported_components.append(list_name) logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
python
def _import_from_importer(network, importer, basename, skip_time=False): attrs = importer.get_attributes() current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")] pypsa_version = None if attrs is not None: network.name = attrs.pop('name') try: pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")] except KeyError: pypsa_version = None for attr, val in iteritems(attrs): setattr(network, attr, val) ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types if pypsa_version is None or pypsa_version < current_pypsa_version: logger.warning(dedent(""" Importing PyPSA from older version of PyPSA than current version {}. Please read the release notes at https://pypsa.org/doc/release_notes.html carefully to prepare your network for import. """).format(network.pypsa_version)) importer.pypsa_version = pypsa_version importer.current_pypsa_version = current_pypsa_version # if there is snapshots.csv, read in snapshot data df = importer.get_snapshots() if df is not None: network.set_snapshots(df.index) if "weightings" in df.columns: network.snapshot_weightings = df["weightings"].reindex(network.snapshots) imported_components = [] # now read in other components; make sure buses and carriers come first for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}): list_name = network.components[component]["list_name"] df = importer.get_static(list_name) if df is None: if component == "Bus": logger.error("Error, no buses found") return else: continue import_components_from_dataframe(network, df, component) if not skip_time: for attr, df in importer.get_series(list_name): import_series_from_dataframe(network, df, component, attr) logger.debug(getattr(network,list_name)) imported_components.append(list_name) logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
[ "def", "_import_from_importer", "(", "network", ",", "importer", ",", "basename", ",", "skip_time", "=", "False", ")", ":", "attrs", "=", "importer", ".", "get_attributes", "(", ")", "current_pypsa_version", "=", "[", "int", "(", "s", ")", "for", "s", "in"...
Import network data from importer. Parameters ---------- skip_time : bool Skip importing time
[ "Import", "network", "data", "from", "importer", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L537-L605
224,654
PyPSA/PyPSA
pypsa/io.py
import_series_from_dataframe
def import_series_from_dataframe(network, dataframe, cls_name, attr): """ Import time series from a pandas DataFrame. Parameters ---------- dataframe : pandas.DataFrame cls_name : string Name of class of component attr : string Name of series attribute Examples -------- >>> import_series_from_dataframe(dataframe,"Load","p_set") """ df = network.df(cls_name) pnl = network.pnl(cls_name) list_name = network.components[cls_name]["list_name"] diff = dataframe.columns.difference(df.index) if len(diff) > 0: logger.warning("Components {} for attribute {} of {} are not in main components dataframe {}".format(diff,attr,cls_name,list_name)) attr_series = network.components[cls_name]["attrs"].loc[attr] columns = dataframe.columns diff = network.snapshots.difference(dataframe.index) if len(diff): logger.warning("Snapshots {} are missing from {} of {}. Filling with default value '{}'".format(diff,attr,cls_name,attr_series["default"])) dataframe = dataframe.reindex(network.snapshots, fill_value=attr_series["default"]) if not attr_series.static: pnl[attr] = pnl[attr].reindex(columns=df.index|columns, fill_value=attr_series.default) else: pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns | columns)) pnl[attr].loc[network.snapshots, columns] = dataframe.loc[network.snapshots, columns]
python
def import_series_from_dataframe(network, dataframe, cls_name, attr): df = network.df(cls_name) pnl = network.pnl(cls_name) list_name = network.components[cls_name]["list_name"] diff = dataframe.columns.difference(df.index) if len(diff) > 0: logger.warning("Components {} for attribute {} of {} are not in main components dataframe {}".format(diff,attr,cls_name,list_name)) attr_series = network.components[cls_name]["attrs"].loc[attr] columns = dataframe.columns diff = network.snapshots.difference(dataframe.index) if len(diff): logger.warning("Snapshots {} are missing from {} of {}. Filling with default value '{}'".format(diff,attr,cls_name,attr_series["default"])) dataframe = dataframe.reindex(network.snapshots, fill_value=attr_series["default"]) if not attr_series.static: pnl[attr] = pnl[attr].reindex(columns=df.index|columns, fill_value=attr_series.default) else: pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns | columns)) pnl[attr].loc[network.snapshots, columns] = dataframe.loc[network.snapshots, columns]
[ "def", "import_series_from_dataframe", "(", "network", ",", "dataframe", ",", "cls_name", ",", "attr", ")", ":", "df", "=", "network", ".", "df", "(", "cls_name", ")", "pnl", "=", "network", ".", "pnl", "(", "cls_name", ")", "list_name", "=", "network", ...
Import time series from a pandas DataFrame. Parameters ---------- dataframe : pandas.DataFrame cls_name : string Name of class of component attr : string Name of series attribute Examples -------- >>> import_series_from_dataframe(dataframe,"Load","p_set")
[ "Import", "time", "series", "from", "a", "pandas", "DataFrame", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/io.py#L690-L728
224,655
PyPSA/PyPSA
pypsa/graph.py
graph
def graph(network, branch_components=None, weight=None, inf_weight=False): """ Build NetworkX graph. Arguments --------- network : Network|SubNetwork branch_components : [str] Components to use as branches. The default are passive_branch_components in the case of a SubNetwork and branch_components in the case of a Network. weight : str Branch attribute to use as weight inf_weight : bool|float How to treat infinite weights (default: False). True keeps the infinite weight. False skips edges with infinite weight. If a float is given it is used instead. Returns ------- graph : OrderedGraph NetworkX graph """ from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components buses_i = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components buses_i = network.buses_i() else: raise TypeError("graph must be called with a Network or a SubNetwork") graph = OrderedGraph() # add nodes first, in case there are isolated buses not connected with branches graph.add_nodes_from(buses_i) # Multigraph uses the branch type and name as key def gen_edges(): for c in network.iterate_components(branch_components): for branch in c.df.loc[slice(None) if c.ind is None else c.ind].itertuples(): if weight is None: data = {} else: data = dict(weight=getattr(branch, weight, 0)) if np.isinf(data['weight']) and inf_weight is not True: if inf_weight is False: continue else: data['weight'] = inf_weight yield (branch.bus0, branch.bus1, (c.name, branch.Index), data) graph.add_edges_from(gen_edges()) return graph
python
def graph(network, branch_components=None, weight=None, inf_weight=False): from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components buses_i = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components buses_i = network.buses_i() else: raise TypeError("graph must be called with a Network or a SubNetwork") graph = OrderedGraph() # add nodes first, in case there are isolated buses not connected with branches graph.add_nodes_from(buses_i) # Multigraph uses the branch type and name as key def gen_edges(): for c in network.iterate_components(branch_components): for branch in c.df.loc[slice(None) if c.ind is None else c.ind].itertuples(): if weight is None: data = {} else: data = dict(weight=getattr(branch, weight, 0)) if np.isinf(data['weight']) and inf_weight is not True: if inf_weight is False: continue else: data['weight'] = inf_weight yield (branch.bus0, branch.bus1, (c.name, branch.Index), data) graph.add_edges_from(gen_edges()) return graph
[ "def", "graph", "(", "network", ",", "branch_components", "=", "None", ",", "weight", "=", "None", ",", "inf_weight", "=", "False", ")", ":", "from", ".", "import", "components", "if", "isinstance", "(", "network", ",", "components", ".", "Network", ")", ...
Build NetworkX graph. Arguments --------- network : Network|SubNetwork branch_components : [str] Components to use as branches. The default are passive_branch_components in the case of a SubNetwork and branch_components in the case of a Network. weight : str Branch attribute to use as weight inf_weight : bool|float How to treat infinite weights (default: False). True keeps the infinite weight. False skips edges with infinite weight. If a float is given it is used instead. Returns ------- graph : OrderedGraph NetworkX graph
[ "Build", "NetworkX", "graph", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/graph.py#L26-L90
224,656
PyPSA/PyPSA
pypsa/opt.py
l_constraint
def l_constraint(model,name,constraints,*args): """A replacement for pyomo's Constraint that quickly builds linear constraints. Instead of model.name = Constraint(index1,index2,...,rule=f) call instead l_constraint(model,name,constraints,index1,index2,...) where constraints is a dictionary of constraints of the form: constraints[i] = LConstraint object OR using the soon-to-be-deprecated list format: constraints[i] = [[(coeff1,var1),(coeff2,var2),...],sense,constant_term] i.e. the first argument is a list of tuples with the variables and their coefficients, the second argument is the sense string (must be one of "==","<=",">=","><") and the third argument is the constant term (a float). The sense "><" allows lower and upper bounds and requires `constant_term` to be a 2-tuple. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel name : string Name of constraints to be constructed constraints : dict A dictionary of constraints (see format above) *args : Indices of the constraints """ setattr(model,name,Constraint(*args,noruleinit=True)) v = getattr(model,name) for i in v._index: c = constraints[i] if type(c) is LConstraint: variables = c.lhs.variables + [(-item[0],item[1]) for item in c.rhs.variables] sense = c.sense constant = c.rhs.constant - c.lhs.constant else: variables = c[0] sense = c[1] constant = c[2] v._data[i] = pyomo.core.base.constraint._GeneralConstraintData(None,v) v._data[i]._body = _build_sum_expression(variables) if sense == "==": v._data[i]._equality = True v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == "<=": v._data[i]._equality = False v._data[i]._lower = None v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == ">=": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = None elif sense == "><": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant[0]) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant[1]) else: raise KeyError('`sense` must be one of "==","<=",">=","><"; got: {}'.format(sense))
python
def l_constraint(model,name,constraints,*args): setattr(model,name,Constraint(*args,noruleinit=True)) v = getattr(model,name) for i in v._index: c = constraints[i] if type(c) is LConstraint: variables = c.lhs.variables + [(-item[0],item[1]) for item in c.rhs.variables] sense = c.sense constant = c.rhs.constant - c.lhs.constant else: variables = c[0] sense = c[1] constant = c[2] v._data[i] = pyomo.core.base.constraint._GeneralConstraintData(None,v) v._data[i]._body = _build_sum_expression(variables) if sense == "==": v._data[i]._equality = True v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == "<=": v._data[i]._equality = False v._data[i]._lower = None v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == ">=": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = None elif sense == "><": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant[0]) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant[1]) else: raise KeyError('`sense` must be one of "==","<=",">=","><"; got: {}'.format(sense))
[ "def", "l_constraint", "(", "model", ",", "name", ",", "constraints", ",", "*", "args", ")", ":", "setattr", "(", "model", ",", "name", ",", "Constraint", "(", "*", "args", ",", "noruleinit", "=", "True", ")", ")", "v", "=", "getattr", "(", "model", ...
A replacement for pyomo's Constraint that quickly builds linear constraints. Instead of model.name = Constraint(index1,index2,...,rule=f) call instead l_constraint(model,name,constraints,index1,index2,...) where constraints is a dictionary of constraints of the form: constraints[i] = LConstraint object OR using the soon-to-be-deprecated list format: constraints[i] = [[(coeff1,var1),(coeff2,var2),...],sense,constant_term] i.e. the first argument is a list of tuples with the variables and their coefficients, the second argument is the sense string (must be one of "==","<=",">=","><") and the third argument is the constant term (a float). The sense "><" allows lower and upper bounds and requires `constant_term` to be a 2-tuple. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel name : string Name of constraints to be constructed constraints : dict A dictionary of constraints (see format above) *args : Indices of the constraints
[ "A", "replacement", "for", "pyomo", "s", "Constraint", "that", "quickly", "builds", "linear", "constraints", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opt.py#L168-L241
224,657
PyPSA/PyPSA
pypsa/opt.py
l_objective
def l_objective(model,objective=None): """ A replacement for pyomo's Objective that quickly builds linear objectives. Instead of model.objective = Objective(expr=sum(vars[i]*coeffs[i] for i in index)+constant) call instead l_objective(model,objective) where objective is an LExpression. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel objective : LExpression """ if objective is None: objective = LExpression() #initialise with a dummy model.objective = Objective(expr = 0.) model.objective._expr = _build_sum_expression(objective.variables, constant=objective.constant)
python
def l_objective(model,objective=None): if objective is None: objective = LExpression() #initialise with a dummy model.objective = Objective(expr = 0.) model.objective._expr = _build_sum_expression(objective.variables, constant=objective.constant)
[ "def", "l_objective", "(", "model", ",", "objective", "=", "None", ")", ":", "if", "objective", "is", "None", ":", "objective", "=", "LExpression", "(", ")", "#initialise with a dummy", "model", ".", "objective", "=", "Objective", "(", "expr", "=", "0.", "...
A replacement for pyomo's Objective that quickly builds linear objectives. Instead of model.objective = Objective(expr=sum(vars[i]*coeffs[i] for i in index)+constant) call instead l_objective(model,objective) where objective is an LExpression. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel objective : LExpression
[ "A", "replacement", "for", "pyomo", "s", "Objective", "that", "quickly", "builds", "linear", "objectives", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/opt.py#L243-L274
224,658
PyPSA/PyPSA
pypsa/components.py
Network._build_dataframes
def _build_dataframes(self): """Function called when network is created to build component pandas.DataFrames.""" for component in self.all_components: attrs = self.components[component]["attrs"] static_dtypes = attrs.loc[attrs.static, "dtype"].drop(["name"]) df = pd.DataFrame({k: pd.Series(dtype=d) for k, d in static_dtypes.iteritems()}, columns=static_dtypes.index) df.index.name = "name" setattr(self,self.components[component]["list_name"],df) pnl = Dict({k : pd.DataFrame(index=self.snapshots, columns=[], #it's currently hard to imagine non-float series, but this could be generalised dtype=np.dtype(float)) for k in attrs.index[attrs.varying]}) setattr(self,self.components[component]["list_name"]+"_t",pnl)
python
def _build_dataframes(self): for component in self.all_components: attrs = self.components[component]["attrs"] static_dtypes = attrs.loc[attrs.static, "dtype"].drop(["name"]) df = pd.DataFrame({k: pd.Series(dtype=d) for k, d in static_dtypes.iteritems()}, columns=static_dtypes.index) df.index.name = "name" setattr(self,self.components[component]["list_name"],df) pnl = Dict({k : pd.DataFrame(index=self.snapshots, columns=[], #it's currently hard to imagine non-float series, but this could be generalised dtype=np.dtype(float)) for k in attrs.index[attrs.varying]}) setattr(self,self.components[component]["list_name"]+"_t",pnl)
[ "def", "_build_dataframes", "(", "self", ")", ":", "for", "component", "in", "self", ".", "all_components", ":", "attrs", "=", "self", ".", "components", "[", "component", "]", "[", "\"attrs\"", "]", "static_dtypes", "=", "attrs", ".", "loc", "[", "attrs",...
Function called when network is created to build component pandas.DataFrames.
[ "Function", "called", "when", "network", "is", "created", "to", "build", "component", "pandas", ".", "DataFrames", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L305-L327
224,659
PyPSA/PyPSA
pypsa/components.py
Network.set_snapshots
def set_snapshots(self,snapshots): """ Set the snapshots and reindex all time-dependent data. This will reindex all pandas.Panels of time-dependent data; NaNs are filled with the default value for that quantity. Parameters ---------- snapshots : list or pandas.Index All time steps. Returns ------- None """ self.snapshots = pd.Index(snapshots) self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.) if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0': snapshots = pd.Index(snapshots.values) for component in self.all_components: pnl = self.pnl(component) attrs = self.components[component]["attrs"] for k,default in attrs.default[attrs.varying].iteritems(): pnl[k] = pnl[k].reindex(self.snapshots).fillna(default)
python
def set_snapshots(self,snapshots): self.snapshots = pd.Index(snapshots) self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.) if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0': snapshots = pd.Index(snapshots.values) for component in self.all_components: pnl = self.pnl(component) attrs = self.components[component]["attrs"] for k,default in attrs.default[attrs.varying].iteritems(): pnl[k] = pnl[k].reindex(self.snapshots).fillna(default)
[ "def", "set_snapshots", "(", "self", ",", "snapshots", ")", ":", "self", ".", "snapshots", "=", "pd", ".", "Index", "(", "snapshots", ")", "self", ".", "snapshot_weightings", "=", "self", ".", "snapshot_weightings", ".", "reindex", "(", "self", ".", "snaps...
Set the snapshots and reindex all time-dependent data. This will reindex all pandas.Panels of time-dependent data; NaNs are filled with the default value for that quantity. Parameters ---------- snapshots : list or pandas.Index All time steps. Returns ------- None
[ "Set", "the", "snapshots", "and", "reindex", "all", "time", "-", "dependent", "data", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L378-L406
224,660
PyPSA/PyPSA
pypsa/components.py
Network.add
def add(self, class_name, name, **kwargs): """ Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1) """ assert class_name in self.components, "Component class {} not found".format(class_name) cls_df = self.df(class_name) cls_pnl = self.pnl(class_name) name = str(name) assert name not in cls_df.index, "Failed to add {} component {} because there is already an object with this name in {}".format(class_name, name, self.components[class_name]["list_name"]) attrs = self.components[class_name]["attrs"] static_attrs = attrs[attrs.static].drop("name") #This guarantees that the correct attribute type is maintained obj_df = pd.DataFrame(data=[static_attrs.default],index=[name],columns=static_attrs.index) new_df = cls_df.append(obj_df, sort=False) setattr(self, self.components[class_name]["list_name"], new_df) for k,v in iteritems(kwargs): if k not in attrs.index: logger.warning("{} has no attribute {}, ignoring this passed value.".format(class_name,k)) continue typ = attrs.at[k, "typ"] if not attrs.at[k,"varying"]: new_df.at[name,k] = typ(v) elif attrs.at[k,"static"] and not isinstance(v, (pd.Series, np.ndarray, list)): new_df.at[name,k] = typ(v) else: cls_pnl[k][name] = pd.Series(data=v, index=self.snapshots, dtype=typ) for attr in ["bus","bus0","bus1"]: if attr in new_df.columns: bus_name = new_df.at[name,attr] if bus_name not in self.buses.index: logger.warning("The bus name `{}` given for {} of {} `{}` does not appear in network.buses".format(bus_name,attr,class_name,name))
python
def add(self, class_name, name, **kwargs): assert class_name in self.components, "Component class {} not found".format(class_name) cls_df = self.df(class_name) cls_pnl = self.pnl(class_name) name = str(name) assert name not in cls_df.index, "Failed to add {} component {} because there is already an object with this name in {}".format(class_name, name, self.components[class_name]["list_name"]) attrs = self.components[class_name]["attrs"] static_attrs = attrs[attrs.static].drop("name") #This guarantees that the correct attribute type is maintained obj_df = pd.DataFrame(data=[static_attrs.default],index=[name],columns=static_attrs.index) new_df = cls_df.append(obj_df, sort=False) setattr(self, self.components[class_name]["list_name"], new_df) for k,v in iteritems(kwargs): if k not in attrs.index: logger.warning("{} has no attribute {}, ignoring this passed value.".format(class_name,k)) continue typ = attrs.at[k, "typ"] if not attrs.at[k,"varying"]: new_df.at[name,k] = typ(v) elif attrs.at[k,"static"] and not isinstance(v, (pd.Series, np.ndarray, list)): new_df.at[name,k] = typ(v) else: cls_pnl[k][name] = pd.Series(data=v, index=self.snapshots, dtype=typ) for attr in ["bus","bus0","bus1"]: if attr in new_df.columns: bus_name = new_df.at[name,attr] if bus_name not in self.buses.index: logger.warning("The bus name `{}` given for {} of {} `{}` does not appear in network.buses".format(bus_name,attr,class_name,name))
[ "def", "add", "(", "self", ",", "class_name", ",", "name", ",", "*", "*", "kwargs", ")", ":", "assert", "class_name", "in", "self", ".", "components", ",", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", "cls_df", "=", "self", "....
Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1)
[ "Add", "a", "single", "component", "to", "the", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L412-L469
224,661
PyPSA/PyPSA
pypsa/components.py
Network.remove
def remove(self, class_name, name): """ Removes a single component from the network. Removes it from component DataFrames. Parameters ---------- class_name : string Component class name name : string Component name Examples -------- >>> network.remove("Line","my_line 12345") """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None cls_df = self.df(class_name) cls_df.drop(name, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): if name in df: df.drop(name, axis=1, inplace=True)
python
def remove(self, class_name, name): if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None cls_df = self.df(class_name) cls_df.drop(name, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): if name in df: df.drop(name, axis=1, inplace=True)
[ "def", "remove", "(", "self", ",", "class_name", ",", "name", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", ")", "return", ...
Removes a single component from the network. Removes it from component DataFrames. Parameters ---------- class_name : string Component class name name : string Component name Examples -------- >>> network.remove("Line","my_line 12345")
[ "Removes", "a", "single", "component", "from", "the", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L472-L503
224,662
PyPSA/PyPSA
pypsa/components.py
Network.madd
def madd(self, class_name, names, suffix='', **kwargs): """ Add multiple components to the network, along with their attributes. Make sure when adding static attributes as pandas Series that they are indexed by names. Make sure when adding time-varying attributes as pandas DataFrames that their index is a superset of network.snapshots and their columns are a subset of names. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] names : list-like or pandas.Index Component names suffix : string, default '' All components are named after names with this added suffix. It is assumed that all Series and DataFrames are indexed by the original names. kwargs Component attributes, e.g. x=[0.1,0.2], can be list, pandas.Series of pandas.DataFrame for time-varying Returns -------- new_names : pandas.index Names of new components (including suffix) Examples -------- >>> network.madd("Load", ["load 1", "load 2"], bus=["1","2"], p_set=np.random.rand(len(network.snapshots),2)) """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) new_names = names.astype(str) + suffix static = {}; series = {} for k, v in iteritems(kwargs): if isinstance(v, pd.DataFrame): series[k] = v.rename(columns=lambda i: str(i)+suffix) elif isinstance(v, pd.Series): static[k] = v.rename(lambda i: str(i)+suffix) elif isinstance(v, np.ndarray) and v.shape == (len(self.snapshots), len(names)): series[k] = pd.DataFrame(v, index=self.snapshots, columns=new_names) else: static[k] = v self.import_components_from_dataframe(pd.DataFrame(static, index=new_names), class_name) for k, v in iteritems(series): self.import_series_from_dataframe(v, class_name, k) return new_names
python
def madd(self, class_name, names, suffix='', **kwargs): if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) new_names = names.astype(str) + suffix static = {}; series = {} for k, v in iteritems(kwargs): if isinstance(v, pd.DataFrame): series[k] = v.rename(columns=lambda i: str(i)+suffix) elif isinstance(v, pd.Series): static[k] = v.rename(lambda i: str(i)+suffix) elif isinstance(v, np.ndarray) and v.shape == (len(self.snapshots), len(names)): series[k] = pd.DataFrame(v, index=self.snapshots, columns=new_names) else: static[k] = v self.import_components_from_dataframe(pd.DataFrame(static, index=new_names), class_name) for k, v in iteritems(series): self.import_series_from_dataframe(v, class_name, k) return new_names
[ "def", "madd", "(", "self", ",", "class_name", ",", "names", ",", "suffix", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", "....
Add multiple components to the network, along with their attributes. Make sure when adding static attributes as pandas Series that they are indexed by names. Make sure when adding time-varying attributes as pandas DataFrames that their index is a superset of network.snapshots and their columns are a subset of names. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] names : list-like or pandas.Index Component names suffix : string, default '' All components are named after names with this added suffix. It is assumed that all Series and DataFrames are indexed by the original names. kwargs Component attributes, e.g. x=[0.1,0.2], can be list, pandas.Series of pandas.DataFrame for time-varying Returns -------- new_names : pandas.index Names of new components (including suffix) Examples -------- >>> network.madd("Load", ["load 1", "load 2"], bus=["1","2"], p_set=np.random.rand(len(network.snapshots),2))
[ "Add", "multiple", "components", "to", "the", "network", "along", "with", "their", "attributes", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L507-L564
224,663
PyPSA/PyPSA
pypsa/components.py
Network.mremove
def mremove(self, class_name, names): """ Removes multiple components from the network. Removes them from component DataFrames. Parameters ---------- class_name : string Component class name name : list-like Component names Examples -------- >>> network.mremove("Line", ["line x", "line y"]) """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) cls_df = self.df(class_name) cls_df.drop(names, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): df.drop(df.columns.intersection(names), axis=1, inplace=True)
python
def mremove(self, class_name, names): if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) cls_df = self.df(class_name) cls_df.drop(names, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): df.drop(df.columns.intersection(names), axis=1, inplace=True)
[ "def", "mremove", "(", "self", ",", "class_name", ",", "names", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", ")", "return", ...
Removes multiple components from the network. Removes them from component DataFrames. Parameters ---------- class_name : string Component class name name : list-like Component names Examples -------- >>> network.mremove("Line", ["line x", "line y"])
[ "Removes", "multiple", "components", "from", "the", "network", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L567-L600
224,664
PyPSA/PyPSA
pypsa/components.py
Network.copy
def copy(self, with_time=True, ignore_standard_types=False): """ Returns a deep copy of the Network object with all components and time-dependent data. Returns -------- network : pypsa.Network Parameters ---------- with_time : boolean, default True Copy snapshots and time-varying network.component_names_t data too. ignore_standard_types : boolean, default False Ignore the PyPSA standard types. Examples -------- >>> network_copy = network.copy() """ override_components, override_component_attrs = self._retrieve_overridden_components() network = self.__class__(ignore_standard_types=ignore_standard_types, override_components=override_components, override_component_attrs=override_component_attrs) for component in self.iterate_components(["Bus", "Carrier"] + sorted(self.all_components - {"Bus","Carrier"})): df = component.df #drop the standard types to avoid them being read in twice if not ignore_standard_types and component.name in self.standard_type_components: df = component.df.drop(network.components[component.name]["standard_types"].index) import_components_from_dataframe(network, df, component.name) if with_time: network.set_snapshots(self.snapshots) for component in self.iterate_components(): pnl = getattr(network, component.list_name+"_t") for k in iterkeys(component.pnl): pnl[k] = component.pnl[k].copy() #catch all remaining attributes of network for attr in ["name", "srid"]: setattr(network,attr,getattr(self,attr)) network.snapshot_weightings = self.snapshot_weightings.copy() return network
python
def copy(self, with_time=True, ignore_standard_types=False): override_components, override_component_attrs = self._retrieve_overridden_components() network = self.__class__(ignore_standard_types=ignore_standard_types, override_components=override_components, override_component_attrs=override_component_attrs) for component in self.iterate_components(["Bus", "Carrier"] + sorted(self.all_components - {"Bus","Carrier"})): df = component.df #drop the standard types to avoid them being read in twice if not ignore_standard_types and component.name in self.standard_type_components: df = component.df.drop(network.components[component.name]["standard_types"].index) import_components_from_dataframe(network, df, component.name) if with_time: network.set_snapshots(self.snapshots) for component in self.iterate_components(): pnl = getattr(network, component.list_name+"_t") for k in iterkeys(component.pnl): pnl[k] = component.pnl[k].copy() #catch all remaining attributes of network for attr in ["name", "srid"]: setattr(network,attr,getattr(self,attr)) network.snapshot_weightings = self.snapshot_weightings.copy() return network
[ "def", "copy", "(", "self", ",", "with_time", "=", "True", ",", "ignore_standard_types", "=", "False", ")", ":", "override_components", ",", "override_component_attrs", "=", "self", ".", "_retrieve_overridden_components", "(", ")", "network", "=", "self", ".", "...
Returns a deep copy of the Network object with all components and time-dependent data. Returns -------- network : pypsa.Network Parameters ---------- with_time : boolean, default True Copy snapshots and time-varying network.component_names_t data too. ignore_standard_types : boolean, default False Ignore the PyPSA standard types. Examples -------- >>> network_copy = network.copy()
[ "Returns", "a", "deep", "copy", "of", "the", "Network", "object", "with", "all", "components", "and", "time", "-", "dependent", "data", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L618-L667
224,665
PyPSA/PyPSA
pypsa/components.py
Network.determine_network_topology
def determine_network_topology(self): """ Build sub_networks from topology. """ adjacency_matrix = self.adjacency_matrix(self.passive_branch_components) n_components, labels = csgraph.connected_components(adjacency_matrix, directed=False) # remove all old sub_networks for sub_network in self.sub_networks.index: obj = self.sub_networks.at[sub_network,"obj"] self.remove("SubNetwork", sub_network) del obj for i in np.arange(n_components): # index of first bus buses_i = (labels == i).nonzero()[0] carrier = self.buses.carrier.iat[buses_i[0]] if carrier not in ["AC","DC"] and len(buses_i) > 1: logger.warning("Warning, sub network {} is not electric but contains multiple buses\n" "and branches. Passive flows are not allowed for non-electric networks!".format(i)) if (self.buses.carrier.iloc[buses_i] != carrier).any(): logger.warning("Warning, sub network {} contains buses with mixed carriers! Value counts:\n{}".format(i), self.buses.carrier.iloc[buses_i].value_counts()) self.add("SubNetwork", i, carrier=carrier) #add objects self.sub_networks["obj"] = [SubNetwork(self, name) for name in self.sub_networks.index] self.buses.loc[:, "sub_network"] = labels.astype(str) for c in self.iterate_components(self.passive_branch_components): c.df["sub_network"] = c.df.bus0.map(self.buses["sub_network"])
python
def determine_network_topology(self): adjacency_matrix = self.adjacency_matrix(self.passive_branch_components) n_components, labels = csgraph.connected_components(adjacency_matrix, directed=False) # remove all old sub_networks for sub_network in self.sub_networks.index: obj = self.sub_networks.at[sub_network,"obj"] self.remove("SubNetwork", sub_network) del obj for i in np.arange(n_components): # index of first bus buses_i = (labels == i).nonzero()[0] carrier = self.buses.carrier.iat[buses_i[0]] if carrier not in ["AC","DC"] and len(buses_i) > 1: logger.warning("Warning, sub network {} is not electric but contains multiple buses\n" "and branches. Passive flows are not allowed for non-electric networks!".format(i)) if (self.buses.carrier.iloc[buses_i] != carrier).any(): logger.warning("Warning, sub network {} contains buses with mixed carriers! Value counts:\n{}".format(i), self.buses.carrier.iloc[buses_i].value_counts()) self.add("SubNetwork", i, carrier=carrier) #add objects self.sub_networks["obj"] = [SubNetwork(self, name) for name in self.sub_networks.index] self.buses.loc[:, "sub_network"] = labels.astype(str) for c in self.iterate_components(self.passive_branch_components): c.df["sub_network"] = c.df.bus0.map(self.buses["sub_network"])
[ "def", "determine_network_topology", "(", "self", ")", ":", "adjacency_matrix", "=", "self", ".", "adjacency_matrix", "(", "self", ".", "passive_branch_components", ")", "n_components", ",", "labels", "=", "csgraph", ".", "connected_components", "(", "adjacency_matrix...
Build sub_networks from topology.
[ "Build", "sub_networks", "from", "topology", "." ]
46954b1b3c21460550f7104681517065279a53b7
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L759-L794
224,666
seb-m/pyinotify
python3/pyinotify.py
logger_init
def logger_init(): """Initialize logger instance.""" log = logging.getLogger("pyinotify") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) log.addHandler(console_handler) log.setLevel(20) return log
python
def logger_init(): log = logging.getLogger("pyinotify") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) log.addHandler(console_handler) log.setLevel(20) return log
[ "def", "logger_init", "(", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "\"pyinotify\"", ")", "console_handler", "=", "logging", ".", "StreamHandler", "(", ")", "console_handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "\"[%(asc...
Initialize logger instance.
[ "Initialize", "logger", "instance", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L256-L264
224,667
seb-m/pyinotify
python3/pyinotify.py
INotifyWrapper.create
def create(): """ Factory method instanciating and returning the right wrapper. """ # First, try to use ctypes. if ctypes: inotify = _CtypesLibcINotifyWrapper() if inotify.init(): return inotify # Second, see if C extension is compiled. if inotify_syscalls: inotify = _INotifySyscallsWrapper() if inotify.init(): return inotify
python
def create(): # First, try to use ctypes. if ctypes: inotify = _CtypesLibcINotifyWrapper() if inotify.init(): return inotify # Second, see if C extension is compiled. if inotify_syscalls: inotify = _INotifySyscallsWrapper() if inotify.init(): return inotify
[ "def", "create", "(", ")", ":", "# First, try to use ctypes.", "if", "ctypes", ":", "inotify", "=", "_CtypesLibcINotifyWrapper", "(", ")", "if", "inotify", ".", "init", "(", ")", ":", "return", "inotify", "# Second, see if C extension is compiled.", "if", "inotify_s...
Factory method instanciating and returning the right wrapper.
[ "Factory", "method", "instanciating", "and", "returning", "the", "right", "wrapper", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L119-L132
224,668
seb-m/pyinotify
python3/pyinotify.py
Stats.my_init
def my_init(self): """ Method automatically called from base class constructor. """ self._start_time = time.time() self._stats = {} self._stats_lock = threading.Lock()
python
def my_init(self): self._start_time = time.time() self._stats = {} self._stats_lock = threading.Lock()
[ "def", "my_init", "(", "self", ")", ":", "self", ".", "_start_time", "=", "time", ".", "time", "(", ")", "self", ".", "_stats", "=", "{", "}", "self", ".", "_stats_lock", "=", "threading", ".", "Lock", "(", ")" ]
Method automatically called from base class constructor.
[ "Method", "automatically", "called", "from", "base", "class", "constructor", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L990-L996
224,669
seb-m/pyinotify
python3/pyinotify.py
Stats.process_default
def process_default(self, event): """ Processes |event|. """ self._stats_lock.acquire() try: events = event.maskname.split('|') for event_name in events: count = self._stats.get(event_name, 0) self._stats[event_name] = count + 1 finally: self._stats_lock.release()
python
def process_default(self, event): self._stats_lock.acquire() try: events = event.maskname.split('|') for event_name in events: count = self._stats.get(event_name, 0) self._stats[event_name] = count + 1 finally: self._stats_lock.release()
[ "def", "process_default", "(", "self", ",", "event", ")", ":", "self", ".", "_stats_lock", ".", "acquire", "(", ")", "try", ":", "events", "=", "event", ".", "maskname", ".", "split", "(", "'|'", ")", "for", "event_name", "in", "events", ":", "count", ...
Processes |event|.
[ "Processes", "|event|", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L998-L1009
224,670
seb-m/pyinotify
python3/pyinotify.py
ThreadedNotifier.stop
def stop(self): """ Stop notifier's loop. Stop notification. Join the thread. """ self._stop_event.set() os.write(self._pipe[1], b'stop') threading.Thread.join(self) Notifier.stop(self) self._pollobj.unregister(self._pipe[0]) os.close(self._pipe[0]) os.close(self._pipe[1])
python
def stop(self): self._stop_event.set() os.write(self._pipe[1], b'stop') threading.Thread.join(self) Notifier.stop(self) self._pollobj.unregister(self._pipe[0]) os.close(self._pipe[0]) os.close(self._pipe[1])
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_stop_event", ".", "set", "(", ")", "os", ".", "write", "(", "self", ".", "_pipe", "[", "1", "]", ",", "b'stop'", ")", "threading", ".", "Thread", ".", "join", "(", "self", ")", "Notifier", ".",...
Stop notifier's loop. Stop notification. Join the thread.
[ "Stop", "notifier", "s", "loop", ".", "Stop", "notification", ".", "Join", "the", "thread", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1455-L1465
224,671
seb-m/pyinotify
python3/pyinotify.py
TornadoAsyncNotifier.handle_read
def handle_read(self, *args, **kwargs): """ See comment in AsyncNotifier. """ self.read_events() self.process_events() if self.handle_read_callback is not None: self.handle_read_callback(self)
python
def handle_read(self, *args, **kwargs): self.read_events() self.process_events() if self.handle_read_callback is not None: self.handle_read_callback(self)
[ "def", "handle_read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "read_events", "(", ")", "self", ".", "process_events", "(", ")", "if", "self", ".", "handle_read_callback", "is", "not", "None", ":", "self", ".", "...
See comment in AsyncNotifier.
[ "See", "comment", "in", "AsyncNotifier", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1557-L1565
224,672
seb-m/pyinotify
python3/pyinotify.py
WatchManager.del_watch
def del_watch(self, wd): """ Remove watch entry associated to watch descriptor wd. @param wd: Watch descriptor. @type wd: int """ try: del self._wmd[wd] except KeyError as err: log.error('Cannot delete unknown watch descriptor %s' % str(err))
python
def del_watch(self, wd): try: del self._wmd[wd] except KeyError as err: log.error('Cannot delete unknown watch descriptor %s' % str(err))
[ "def", "del_watch", "(", "self", ",", "wd", ")", ":", "try", ":", "del", "self", ".", "_wmd", "[", "wd", "]", "except", "KeyError", "as", "err", ":", "log", ".", "error", "(", "'Cannot delete unknown watch descriptor %s'", "%", "str", "(", "err", ")", ...
Remove watch entry associated to watch descriptor wd. @param wd: Watch descriptor. @type wd: int
[ "Remove", "watch", "entry", "associated", "to", "watch", "descriptor", "wd", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1799-L1809
224,673
seb-m/pyinotify
python3/pyinotify.py
WatchManager.__add_watch
def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): """ Add a watch on path, build a Watch object and insert it in the watch manager dictionary. Return the wd value. """ path = self.__format_path(path) if auto_add and not mask & IN_CREATE: mask |= IN_CREATE wd = self._inotify_wrapper.inotify_add_watch(self._fd, path, mask) if wd < 0: return wd watch = Watch(wd=wd, path=path, mask=mask, proc_fun=proc_fun, auto_add=auto_add, exclude_filter=exclude_filter) # wd are _always_ indexed with their original unicode paths in wmd. self._wmd[wd] = watch log.debug('New %s', watch) return wd
python
def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): path = self.__format_path(path) if auto_add and not mask & IN_CREATE: mask |= IN_CREATE wd = self._inotify_wrapper.inotify_add_watch(self._fd, path, mask) if wd < 0: return wd watch = Watch(wd=wd, path=path, mask=mask, proc_fun=proc_fun, auto_add=auto_add, exclude_filter=exclude_filter) # wd are _always_ indexed with their original unicode paths in wmd. self._wmd[wd] = watch log.debug('New %s', watch) return wd
[ "def", "__add_watch", "(", "self", ",", "path", ",", "mask", ",", "proc_fun", ",", "auto_add", ",", "exclude_filter", ")", ":", "path", "=", "self", ".", "__format_path", "(", "path", ")", "if", "auto_add", "and", "not", "mask", "&", "IN_CREATE", ":", ...
Add a watch on path, build a Watch object and insert it in the watch manager dictionary. Return the wd value.
[ "Add", "a", "watch", "on", "path", "build", "a", "Watch", "object", "and", "insert", "it", "in", "the", "watch", "manager", "dictionary", ".", "Return", "the", "wd", "value", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1828-L1844
224,674
seb-m/pyinotify
python3/pyinotify.py
WatchManager.update_watch
def update_watch(self, wd, mask=None, proc_fun=None, rec=False, auto_add=False, quiet=True): """ Update existing watch descriptors |wd|. The |mask| value, the processing object |proc_fun|, the recursive param |rec| and the |auto_add| and |quiet| flags can all be updated. @param wd: Watch Descriptor to update. Also accepts a list of watch descriptors. @type wd: int or list of int @param mask: Optional new bitmask of events. @type mask: int @param proc_fun: Optional new processing function. @type proc_fun: function or ProcessEvent instance or instance of one of its subclasses or callable object. @param rec: Optionally adds watches recursively on all subdirectories contained into |wd| directory. @type rec: bool @param auto_add: Automatically adds watches on newly created directories in the watch's path corresponding to |wd|. If |auto_add| is True, IN_CREATE is ored with |mask| when the watch is updated. @type auto_add: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully updated, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: apath = self.get_path(awd) if not apath or awd < 0: err = 'update_watch: invalid WD=%d' % awd if quiet: log.error(err) continue raise WatchManagerError(err, ret_) if mask: wd_ = self._inotify_wrapper.inotify_add_watch(self._fd, apath, mask) if wd_ < 0: ret_[awd] = False err = ('update_watch: cannot update %s WD=%d, %s' % \ (apath, wd_, self._inotify_wrapper.str_errno())) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) assert(awd == wd_) if proc_fun or auto_add: watch_ = self._wmd[awd] if proc_fun: watch_.proc_fun = proc_fun if auto_add: watch_.auto_add = auto_add ret_[awd] = True log.debug('Updated watch - %s', self._wmd[awd]) return ret_
python
def update_watch(self, wd, mask=None, proc_fun=None, rec=False, auto_add=False, quiet=True): lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: apath = self.get_path(awd) if not apath or awd < 0: err = 'update_watch: invalid WD=%d' % awd if quiet: log.error(err) continue raise WatchManagerError(err, ret_) if mask: wd_ = self._inotify_wrapper.inotify_add_watch(self._fd, apath, mask) if wd_ < 0: ret_[awd] = False err = ('update_watch: cannot update %s WD=%d, %s' % \ (apath, wd_, self._inotify_wrapper.str_errno())) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) assert(awd == wd_) if proc_fun or auto_add: watch_ = self._wmd[awd] if proc_fun: watch_.proc_fun = proc_fun if auto_add: watch_.auto_add = auto_add ret_[awd] = True log.debug('Updated watch - %s', self._wmd[awd]) return ret_
[ "def", "update_watch", "(", "self", ",", "wd", ",", "mask", "=", "None", ",", "proc_fun", "=", "None", ",", "rec", "=", "False", ",", "auto_add", "=", "False", ",", "quiet", "=", "True", ")", ":", "lwd", "=", "self", ".", "__format_param", "(", "wd...
Update existing watch descriptors |wd|. The |mask| value, the processing object |proc_fun|, the recursive param |rec| and the |auto_add| and |quiet| flags can all be updated. @param wd: Watch Descriptor to update. Also accepts a list of watch descriptors. @type wd: int or list of int @param mask: Optional new bitmask of events. @type mask: int @param proc_fun: Optional new processing function. @type proc_fun: function or ProcessEvent instance or instance of one of its subclasses or callable object. @param rec: Optionally adds watches recursively on all subdirectories contained into |wd| directory. @type rec: bool @param auto_add: Automatically adds watches on newly created directories in the watch's path corresponding to |wd|. If |auto_add| is True, IN_CREATE is ored with |mask| when the watch is updated. @type auto_add: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully updated, False otherwise. @rtype: dict of {int: bool}
[ "Update", "existing", "watch", "descriptors", "|wd|", ".", "The", "|mask|", "value", "the", "processing", "object", "|proc_fun|", "the", "recursive", "param", "|rec|", "and", "the", "|auto_add|", "and", "|quiet|", "flags", "can", "all", "be", "updated", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L1969-L2039
224,675
seb-m/pyinotify
python3/pyinotify.py
WatchManager.get_path
def get_path(self, wd): """ Returns the path associated to WD, if WD is unknown it returns None. @param wd: Watch descriptor. @type wd: int @return: Path or None. @rtype: string or None """ watch_ = self._wmd.get(wd) if watch_ is not None: return watch_.path
python
def get_path(self, wd): watch_ = self._wmd.get(wd) if watch_ is not None: return watch_.path
[ "def", "get_path", "(", "self", ",", "wd", ")", ":", "watch_", "=", "self", ".", "_wmd", ".", "get", "(", "wd", ")", "if", "watch_", "is", "not", "None", ":", "return", "watch_", ".", "path" ]
Returns the path associated to WD, if WD is unknown it returns None. @param wd: Watch descriptor. @type wd: int @return: Path or None. @rtype: string or None
[ "Returns", "the", "path", "associated", "to", "WD", "if", "WD", "is", "unknown", "it", "returns", "None", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L2070-L2081
224,676
seb-m/pyinotify
python3/pyinotify.py
WatchManager.__walk_rec
def __walk_rec(self, top, rec): """ Yields each subdirectories of top, doesn't follow symlinks. If rec is false, only yield top. @param top: root directory. @type top: string @param rec: recursive flag. @type rec: bool @return: path of one subdirectory. @rtype: string """ if not rec or os.path.islink(top) or not os.path.isdir(top): yield top else: for root, dirs, files in os.walk(top): yield root
python
def __walk_rec(self, top, rec): if not rec or os.path.islink(top) or not os.path.isdir(top): yield top else: for root, dirs, files in os.walk(top): yield root
[ "def", "__walk_rec", "(", "self", ",", "top", ",", "rec", ")", ":", "if", "not", "rec", "or", "os", ".", "path", ".", "islink", "(", "top", ")", "or", "not", "os", ".", "path", ".", "isdir", "(", "top", ")", ":", "yield", "top", "else", ":", ...
Yields each subdirectories of top, doesn't follow symlinks. If rec is false, only yield top. @param top: root directory. @type top: string @param rec: recursive flag. @type rec: bool @return: path of one subdirectory. @rtype: string
[ "Yields", "each", "subdirectories", "of", "top", "doesn", "t", "follow", "symlinks", ".", "If", "rec", "is", "false", "only", "yield", "top", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L2083-L2099
224,677
seb-m/pyinotify
python2/pyinotify.py
_SysProcessEvent.process_IN_CREATE
def process_IN_CREATE(self, raw_event): """ If the event affects a directory and the auto_add flag of the targetted watch is set to True, a new watch is added on this new directory, with the same attribute values than those of this watch. """ if raw_event.mask & IN_ISDIR: watch_ = self._watch_manager.get_watch(raw_event.wd) created_dir = os.path.join(watch_.path, raw_event.name) if watch_.auto_add and not watch_.exclude_filter(created_dir): addw = self._watch_manager.add_watch # The newly monitored directory inherits attributes from its # parent directory. addw_ret = addw(created_dir, watch_.mask, proc_fun=watch_.proc_fun, rec=False, auto_add=watch_.auto_add, exclude_filter=watch_.exclude_filter) # Trick to handle mkdir -p /d1/d2/t3 where d1 is watched and # d2 and t3 (directory or file) are created. # Since the directory d2 is new, then everything inside it must # also be new. created_dir_wd = addw_ret.get(created_dir) if ((created_dir_wd is not None) and (created_dir_wd > 0) and os.path.isdir(created_dir)): try: for name in os.listdir(created_dir): inner = os.path.join(created_dir, name) if self._watch_manager.get_wd(inner) is not None: continue # Generate (simulate) creation events for sub- # directories and files. if os.path.isfile(inner): # symlinks are handled as files. flags = IN_CREATE elif os.path.isdir(inner): flags = IN_CREATE | IN_ISDIR else: # This path should not be taken. continue rawevent = _RawEvent(created_dir_wd, flags, 0, name) self._notifier.append_event(rawevent) except OSError, err: msg = "process_IN_CREATE, invalid directory %s: %s" log.debug(msg % (created_dir, str(err))) return self.process_default(raw_event)
python
def process_IN_CREATE(self, raw_event): if raw_event.mask & IN_ISDIR: watch_ = self._watch_manager.get_watch(raw_event.wd) created_dir = os.path.join(watch_.path, raw_event.name) if watch_.auto_add and not watch_.exclude_filter(created_dir): addw = self._watch_manager.add_watch # The newly monitored directory inherits attributes from its # parent directory. addw_ret = addw(created_dir, watch_.mask, proc_fun=watch_.proc_fun, rec=False, auto_add=watch_.auto_add, exclude_filter=watch_.exclude_filter) # Trick to handle mkdir -p /d1/d2/t3 where d1 is watched and # d2 and t3 (directory or file) are created. # Since the directory d2 is new, then everything inside it must # also be new. created_dir_wd = addw_ret.get(created_dir) if ((created_dir_wd is not None) and (created_dir_wd > 0) and os.path.isdir(created_dir)): try: for name in os.listdir(created_dir): inner = os.path.join(created_dir, name) if self._watch_manager.get_wd(inner) is not None: continue # Generate (simulate) creation events for sub- # directories and files. if os.path.isfile(inner): # symlinks are handled as files. flags = IN_CREATE elif os.path.isdir(inner): flags = IN_CREATE | IN_ISDIR else: # This path should not be taken. continue rawevent = _RawEvent(created_dir_wd, flags, 0, name) self._notifier.append_event(rawevent) except OSError, err: msg = "process_IN_CREATE, invalid directory %s: %s" log.debug(msg % (created_dir, str(err))) return self.process_default(raw_event)
[ "def", "process_IN_CREATE", "(", "self", ",", "raw_event", ")", ":", "if", "raw_event", ".", "mask", "&", "IN_ISDIR", ":", "watch_", "=", "self", ".", "_watch_manager", ".", "get_watch", "(", "raw_event", ".", "wd", ")", "created_dir", "=", "os", ".", "p...
If the event affects a directory and the auto_add flag of the targetted watch is set to True, a new watch is added on this new directory, with the same attribute values than those of this watch.
[ "If", "the", "event", "affects", "a", "directory", "and", "the", "auto_add", "flag", "of", "the", "targetted", "watch", "is", "set", "to", "True", "a", "new", "watch", "is", "added", "on", "this", "new", "directory", "with", "the", "same", "attribute", "...
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python2/pyinotify.py#L694-L740
224,678
seb-m/pyinotify
python2/pyinotify.py
Notifier.check_events
def check_events(self, timeout=None): """ Check for new events available to read, blocks up to timeout milliseconds. @param timeout: If specified it overrides the corresponding instance attribute _timeout. timeout must be sepcified in milliseconds. @type timeout: int @return: New events to read. @rtype: bool """ while True: try: # blocks up to 'timeout' milliseconds if timeout is None: timeout = self._timeout ret = self._pollobj.poll(timeout) except select.error, err: if err[0] == errno.EINTR: continue # interrupted, retry else: raise else: break if not ret or (self._pipe[0] == ret[0][0]): return False # only one fd is polled return ret[0][1] & select.POLLIN
python
def check_events(self, timeout=None): while True: try: # blocks up to 'timeout' milliseconds if timeout is None: timeout = self._timeout ret = self._pollobj.poll(timeout) except select.error, err: if err[0] == errno.EINTR: continue # interrupted, retry else: raise else: break if not ret or (self._pipe[0] == ret[0][0]): return False # only one fd is polled return ret[0][1] & select.POLLIN
[ "def", "check_events", "(", "self", ",", "timeout", "=", "None", ")", ":", "while", "True", ":", "try", ":", "# blocks up to 'timeout' milliseconds", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "_timeout", "ret", "=", "self", ".", "_po...
Check for new events available to read, blocks up to timeout milliseconds. @param timeout: If specified it overrides the corresponding instance attribute _timeout. timeout must be sepcified in milliseconds. @type timeout: int @return: New events to read. @rtype: bool
[ "Check", "for", "new", "events", "available", "to", "read", "blocks", "up", "to", "timeout", "milliseconds", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python2/pyinotify.py#L1193-L1223
224,679
seb-m/pyinotify
python2/pyinotify.py
Notifier.read_events
def read_events(self): """ Read events from device, build _RawEvents, and enqueue them. """ buf_ = array.array('i', [0]) # get event queue size if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: return queue_size = buf_[0] if queue_size < self._threshold: log.debug('(fd: %d) %d bytes available to read but threshold is ' 'fixed to %d bytes', self._fd, queue_size, self._threshold) return try: # Read content from file r = os.read(self._fd, queue_size) except Exception, msg: raise NotifierError(msg) log.debug('Event queue size: %d', queue_size) rsum = 0 # counter while rsum < queue_size: s_size = 16 # Retrieve wd, mask, cookie and fname_len wd, mask, cookie, fname_len = struct.unpack('iIII', r[rsum:rsum+s_size]) # Retrieve name fname, = struct.unpack('%ds' % fname_len, r[rsum + s_size:rsum + s_size + fname_len]) rawevent = _RawEvent(wd, mask, cookie, fname) if self._coalesce: # Only enqueue new (unique) events. raweventstr = str(rawevent) if raweventstr not in self._eventset: self._eventset.add(raweventstr) self._eventq.append(rawevent) else: self._eventq.append(rawevent) rsum += s_size + fname_len
python
def read_events(self): buf_ = array.array('i', [0]) # get event queue size if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: return queue_size = buf_[0] if queue_size < self._threshold: log.debug('(fd: %d) %d bytes available to read but threshold is ' 'fixed to %d bytes', self._fd, queue_size, self._threshold) return try: # Read content from file r = os.read(self._fd, queue_size) except Exception, msg: raise NotifierError(msg) log.debug('Event queue size: %d', queue_size) rsum = 0 # counter while rsum < queue_size: s_size = 16 # Retrieve wd, mask, cookie and fname_len wd, mask, cookie, fname_len = struct.unpack('iIII', r[rsum:rsum+s_size]) # Retrieve name fname, = struct.unpack('%ds' % fname_len, r[rsum + s_size:rsum + s_size + fname_len]) rawevent = _RawEvent(wd, mask, cookie, fname) if self._coalesce: # Only enqueue new (unique) events. raweventstr = str(rawevent) if raweventstr not in self._eventset: self._eventset.add(raweventstr) self._eventq.append(rawevent) else: self._eventq.append(rawevent) rsum += s_size + fname_len
[ "def", "read_events", "(", "self", ")", ":", "buf_", "=", "array", ".", "array", "(", "'i'", ",", "[", "0", "]", ")", "# get event queue size", "if", "fcntl", ".", "ioctl", "(", "self", ".", "_fd", ",", "termios", ".", "FIONREAD", ",", "buf_", ",", ...
Read events from device, build _RawEvents, and enqueue them.
[ "Read", "events", "from", "device", "build", "_RawEvents", "and", "enqueue", "them", "." ]
0f3f8950d12e4a6534320153eed1a90a778da4ae
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python2/pyinotify.py#L1225-L1264
224,680
sibson/redbeat
redbeat/schedulers.py
ensure_conf
def ensure_conf(app): """ Ensure for the given app the the redbeat_conf attribute is set to an instance of the RedBeatConfig class. """ name = 'redbeat_conf' app = app_or_default(app) try: config = getattr(app, name) except AttributeError: config = RedBeatConfig(app) setattr(app, name, config) return config
python
def ensure_conf(app): name = 'redbeat_conf' app = app_or_default(app) try: config = getattr(app, name) except AttributeError: config = RedBeatConfig(app) setattr(app, name, config) return config
[ "def", "ensure_conf", "(", "app", ")", ":", "name", "=", "'redbeat_conf'", "app", "=", "app_or_default", "(", "app", ")", "try", ":", "config", "=", "getattr", "(", "app", ",", "name", ")", "except", "AttributeError", ":", "config", "=", "RedBeatConfig", ...
Ensure for the given app the the redbeat_conf attribute is set to an instance of the RedBeatConfig class.
[ "Ensure", "for", "the", "given", "app", "the", "the", "redbeat_conf", "attribute", "is", "set", "to", "an", "instance", "of", "the", "RedBeatConfig", "class", "." ]
034bbb6c583333a527e835bd6b660c9177e36e9b
https://github.com/sibson/redbeat/blob/034bbb6c583333a527e835bd6b660c9177e36e9b/redbeat/schedulers.py#L90-L103
224,681
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
http2time
def http2time(text): """Returns time in seconds since epoch of time represented by a string. Return value is an integer. None is returned if the format of str is unrecognized, the time is outside the representable range, or the timezone string is not recognized. If the string contains no timezone, UTC is assumed. The timezone in the string may be numerical (like "-0800" or "+0100") or a string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the timezone strings equivalent to UTC (zero offset) are known to the function. The function loosely parses the following formats: Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) The parser ignores leading and trailing whitespace. The time may be absent. If the year is given with only 2 digits, the function will select the century that makes the year closest to the current date. """ # fast exit for strictly conforming string m = STRICT_DATE_RE.search(text) if m: g = m.groups() mon = MONTHS_LOWER.index(g[1].lower()) + 1 tt = (int(g[2]), mon, int(g[0]), int(g[3]), int(g[4]), float(g[5])) return _timegm(tt) # No, we need some messy parsing... # clean up text = text.lstrip() text = WEEKDAY_RE.sub("", text, 1) # Useless weekday # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = LOOSE_HTTP_DATE_RE.search(text) if m is not None: day, mon, yr, hr, min, sec, tz = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz)
python
def http2time(text): # fast exit for strictly conforming string m = STRICT_DATE_RE.search(text) if m: g = m.groups() mon = MONTHS_LOWER.index(g[1].lower()) + 1 tt = (int(g[2]), mon, int(g[0]), int(g[3]), int(g[4]), float(g[5])) return _timegm(tt) # No, we need some messy parsing... # clean up text = text.lstrip() text = WEEKDAY_RE.sub("", text, 1) # Useless weekday # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = LOOSE_HTTP_DATE_RE.search(text) if m is not None: day, mon, yr, hr, min, sec, tz = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz)
[ "def", "http2time", "(", "text", ")", ":", "# fast exit for strictly conforming string", "m", "=", "STRICT_DATE_RE", ".", "search", "(", "text", ")", "if", "m", ":", "g", "=", "m", ".", "groups", "(", ")", "mon", "=", "MONTHS_LOWER", ".", "index", "(", "...
Returns time in seconds since epoch of time represented by a string. Return value is an integer. None is returned if the format of str is unrecognized, the time is outside the representable range, or the timezone string is not recognized. If the string contains no timezone, UTC is assumed. The timezone in the string may be numerical (like "-0800" or "+0100") or a string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the timezone strings equivalent to UTC (zero offset) are known to the function. The function loosely parses the following formats: Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) The parser ignores leading and trailing whitespace. The time may be absent. If the year is given with only 2 digits, the function will select the century that makes the year closest to the current date.
[ "Returns", "time", "in", "seconds", "since", "epoch", "of", "time", "represented", "by", "a", "string", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L232-L286
224,682
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
unmatched
def unmatched(match): """Return unmatched part of re.Match object.""" start, end = match.span(0) return match.string[:start]+match.string[end:]
python
def unmatched(match): start, end = match.span(0) return match.string[:start]+match.string[end:]
[ "def", "unmatched", "(", "match", ")", ":", "start", ",", "end", "=", "match", ".", "span", "(", "0", ")", "return", "match", ".", "string", "[", ":", "start", "]", "+", "match", ".", "string", "[", "end", ":", "]" ]
Return unmatched part of re.Match object.
[ "Return", "unmatched", "part", "of", "re", ".", "Match", "object", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L337-L340
224,683
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
split_header_words
def split_header_words(header_values): r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1*<any CHAR except CTLs or separators> separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = <any TEXT except <">> quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]] """ assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: # quoted value text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: # unquoted value text = unmatched(m) value = m.group(1) value = value.rstrip() else: # no value, a lone token value = None pairs.append((name, value)) elif text.lstrip().startswith(","): # concatenated headers, as per RFC 2616 section 4.2 text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: # skip junk non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result
python
def split_header_words(header_values): r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1*<any CHAR except CTLs or separators> separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = <any TEXT except <">> quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]] """ assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: # quoted value text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: # unquoted value text = unmatched(m) value = m.group(1) value = value.rstrip() else: # no value, a lone token value = None pairs.append((name, value)) elif text.lstrip().startswith(","): # concatenated headers, as per RFC 2616 section 4.2 text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: # skip junk non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result
[ "def", "split_header_words", "(", "header_values", ")", ":", "assert", "not", "isinstance", "(", "header_values", ",", "str", ")", "result", "=", "[", "]", "for", "text", "in", "header_values", ":", "orig_text", "=", "text", "pairs", "=", "[", "]", "while"...
r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1*<any CHAR except CTLs or separators> separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = <any TEXT except <">> quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]]
[ "r", "Parse", "header", "values", "into", "a", "list", "of", "lists", "containing", "key", "value", "pairs", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L346-L429
224,684
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
parse_ns_headers
def parse_ns_headers(ns_headers): """Ad-hoc parser for Netscape protocol cookie-attributes. The old Netscape cookie format for Set-Cookie can for instance contain an unquoted "," in the expires field, so we have to use this ad-hoc parser instead of split_header_words. XXX This may not make the best possible effort to parse all the crap that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient parser is probably better, so could do worse than following that if this ever gives any trouble. Currently, this is also used for parsing RFC 2109 cookies. """ known_attrs = ("expires", "domain", "path", "secure", # RFC 2109 attrs (may turn up in Netscape cookies, too) "version", "port", "max-age") result = [] for ns_header in ns_headers: pairs = [] version_set = False for ii, param in enumerate(re.split(r";\s*", ns_header)): param = param.rstrip() if param == "": continue if "=" not in param: k, v = param, None else: k, v = re.split(r"\s*=\s*", param, 1) k = k.lstrip() if ii != 0: lc = k.lower() if lc in known_attrs: k = lc if k == "version": # This is an RFC 2109 cookie. v = strip_quotes(v) version_set = True if k == "expires": # convert expires date to seconds since epoch v = http2time(strip_quotes(v)) # None if invalid pairs.append((k, v)) if pairs: if not version_set: pairs.append(("version", "0")) result.append(pairs) return result
python
def parse_ns_headers(ns_headers): known_attrs = ("expires", "domain", "path", "secure", # RFC 2109 attrs (may turn up in Netscape cookies, too) "version", "port", "max-age") result = [] for ns_header in ns_headers: pairs = [] version_set = False for ii, param in enumerate(re.split(r";\s*", ns_header)): param = param.rstrip() if param == "": continue if "=" not in param: k, v = param, None else: k, v = re.split(r"\s*=\s*", param, 1) k = k.lstrip() if ii != 0: lc = k.lower() if lc in known_attrs: k = lc if k == "version": # This is an RFC 2109 cookie. v = strip_quotes(v) version_set = True if k == "expires": # convert expires date to seconds since epoch v = http2time(strip_quotes(v)) # None if invalid pairs.append((k, v)) if pairs: if not version_set: pairs.append(("version", "0")) result.append(pairs) return result
[ "def", "parse_ns_headers", "(", "ns_headers", ")", ":", "known_attrs", "=", "(", "\"expires\"", ",", "\"domain\"", ",", "\"path\"", ",", "\"secure\"", ",", "# RFC 2109 attrs (may turn up in Netscape cookies, too)", "\"version\"", ",", "\"port\"", ",", "\"max-age\"", ")"...
Ad-hoc parser for Netscape protocol cookie-attributes. The old Netscape cookie format for Set-Cookie can for instance contain an unquoted "," in the expires field, so we have to use this ad-hoc parser instead of split_header_words. XXX This may not make the best possible effort to parse all the crap that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient parser is probably better, so could do worse than following that if this ever gives any trouble. Currently, this is also used for parsing RFC 2109 cookies.
[ "Ad", "-", "hoc", "parser", "for", "Netscape", "protocol", "cookie", "-", "attributes", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L464-L513
224,685
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
is_HDN
def is_HDN(text): """Return True if text is a host domain name.""" # XXX # This may well be wrong. Which RFC is HDN defined in, if any (for # the purposes of RFC 2965)? # For the current implementation, what about IPv6? Remember to look # at other uses of IPV4_RE also, if change this. if IPV4_RE.search(text): return False if text == "": return False if text[0] == "." or text[-1] == ".": return False return True
python
def is_HDN(text): # XXX # This may well be wrong. Which RFC is HDN defined in, if any (for # the purposes of RFC 2965)? # For the current implementation, what about IPv6? Remember to look # at other uses of IPV4_RE also, if change this. if IPV4_RE.search(text): return False if text == "": return False if text[0] == "." or text[-1] == ".": return False return True
[ "def", "is_HDN", "(", "text", ")", ":", "# XXX", "# This may well be wrong. Which RFC is HDN defined in, if any (for", "# the purposes of RFC 2965)?", "# For the current implementation, what about IPv6? Remember to look", "# at other uses of IPV4_RE also, if change this.", "if", "IPV4_RE...
Return True if text is a host domain name.
[ "Return", "True", "if", "text", "is", "a", "host", "domain", "name", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L517-L530
224,686
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
domain_match
def domain_match(A, B): """Return True if domain A domain-matches domain B, according to RFC 2965. A and B may be host domain names or IP addresses. RFC 2965, section 1: Host names can be specified either as an IP address or a HDN string. Sometimes we compare one host name with another. (Such comparisons SHALL be case-insensitive.) Host A's name domain-matches host B's if * their host name strings string-compare equal; or * A is a HDN string and has the form NB, where N is a non-empty name string, B has the form .B', and B' is a HDN string. (So, x.y.com domain-matches .Y.com but not Y.com.) Note that domain-match is not a commutative operation: a.b.c.com domain-matches .c.com, but not the reverse. """ # Note that, if A or B are IP addresses, the only relevant part of the # definition of the domain-match algorithm is the direct string-compare. A = A.lower() B = B.lower() if A == B: return True if not is_HDN(A): return False i = A.rfind(B) if i == -1 or i == 0: # A does not have form NB, or N is the empty string return False if not B.startswith("."): return False if not is_HDN(B[1:]): return False return True
python
def domain_match(A, B): # Note that, if A or B are IP addresses, the only relevant part of the # definition of the domain-match algorithm is the direct string-compare. A = A.lower() B = B.lower() if A == B: return True if not is_HDN(A): return False i = A.rfind(B) if i == -1 or i == 0: # A does not have form NB, or N is the empty string return False if not B.startswith("."): return False if not is_HDN(B[1:]): return False return True
[ "def", "domain_match", "(", "A", ",", "B", ")", ":", "# Note that, if A or B are IP addresses, the only relevant part of the", "# definition of the domain-match algorithm is the direct string-compare.", "A", "=", "A", ".", "lower", "(", ")", "B", "=", "B", ".", "lower", "...
Return True if domain A domain-matches domain B, according to RFC 2965. A and B may be host domain names or IP addresses. RFC 2965, section 1: Host names can be specified either as an IP address or a HDN string. Sometimes we compare one host name with another. (Such comparisons SHALL be case-insensitive.) Host A's name domain-matches host B's if * their host name strings string-compare equal; or * A is a HDN string and has the form NB, where N is a non-empty name string, B has the form .B', and B' is a HDN string. (So, x.y.com domain-matches .Y.com but not Y.com.) Note that domain-match is not a commutative operation: a.b.c.com domain-matches .c.com, but not the reverse.
[ "Return", "True", "if", "domain", "A", "domain", "-", "matches", "domain", "B", "according", "to", "RFC", "2965", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L532-L569
224,687
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
request_path
def request_path(request): """Path component of request-URI, as defined by RFC 2965.""" url = request.get_full_url() parts = urlsplit(url) path = escape_path(parts.path) if not path.startswith("/"): # fix bad RFC 2396 absoluteURI path = "/" + path return path
python
def request_path(request): url = request.get_full_url() parts = urlsplit(url) path = escape_path(parts.path) if not path.startswith("/"): # fix bad RFC 2396 absoluteURI path = "/" + path return path
[ "def", "request_path", "(", "request", ")", ":", "url", "=", "request", ".", "get_full_url", "(", ")", "parts", "=", "urlsplit", "(", "url", ")", "path", "=", "escape_path", "(", "parts", ".", "path", ")", "if", "not", "path", ".", "startswith", "(", ...
Path component of request-URI, as defined by RFC 2965.
[ "Path", "component", "of", "request", "-", "URI", "as", "defined", "by", "RFC", "2965", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L629-L637
224,688
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
escape_path
def escape_path(path): """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" # There's no knowing what character encoding was used to create URLs # containing %-escapes, but since we have to pick one to escape invalid # path characters, we pick UTF-8, as recommended in the HTML 4.0 # specification: # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 # And here, kind of: draft-fielding-uri-rfc2396bis-03 # (And in draft IRI specification: draft-duerst-iri-05) # (And here, for new URI schemes: RFC 2718) path = quote(path, HTTP_PATH_SAFE) path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) return path
python
def escape_path(path): # There's no knowing what character encoding was used to create URLs # containing %-escapes, but since we have to pick one to escape invalid # path characters, we pick UTF-8, as recommended in the HTML 4.0 # specification: # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 # And here, kind of: draft-fielding-uri-rfc2396bis-03 # (And in draft IRI specification: draft-duerst-iri-05) # (And here, for new URI schemes: RFC 2718) path = quote(path, HTTP_PATH_SAFE) path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) return path
[ "def", "escape_path", "(", "path", ")", ":", "# There's no knowing what character encoding was used to create URLs", "# containing %-escapes, but since we have to pick one to escape invalid", "# path characters, we pick UTF-8, as recommended in the HTML 4.0", "# specification:", "# http://www.w3...
Escape any invalid characters in HTTP URL, and uppercase all escapes.
[ "Escape", "any", "invalid", "characters", "in", "HTTP", "URL", "and", "uppercase", "all", "escapes", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L659-L671
224,689
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
reach
def reach(h): """Return reach of host h, as defined by RFC 2965, section 1. The reach R of a host name H is defined as follows: * If - H is the host domain name of a host; and, - H has the form A.B; and - A has no embedded (that is, interior) dots; and - B has at least one embedded dot, or B is the string "local". then the reach of H is .B. * Otherwise, the reach of H is H. >>> reach("www.acme.com") '.acme.com' >>> reach("acme.com") 'acme.com' >>> reach("acme.local") '.local' """ i = h.find(".") if i >= 0: #a = h[:i] # this line is only here to show what a is b = h[i+1:] i = b.find(".") if is_HDN(h) and (i >= 0 or b == "local"): return "."+b return h
python
def reach(h): i = h.find(".") if i >= 0: #a = h[:i] # this line is only here to show what a is b = h[i+1:] i = b.find(".") if is_HDN(h) and (i >= 0 or b == "local"): return "."+b return h
[ "def", "reach", "(", "h", ")", ":", "i", "=", "h", ".", "find", "(", "\".\"", ")", "if", "i", ">=", "0", ":", "#a = h[:i] # this line is only here to show what a is", "b", "=", "h", "[", "i", "+", "1", ":", "]", "i", "=", "b", ".", "find", "(", ...
Return reach of host h, as defined by RFC 2965, section 1. The reach R of a host name H is defined as follows: * If - H is the host domain name of a host; and, - H has the form A.B; and - A has no embedded (that is, interior) dots; and - B has at least one embedded dot, or B is the string "local". then the reach of H is .B. * Otherwise, the reach of H is H. >>> reach("www.acme.com") '.acme.com' >>> reach("acme.com") 'acme.com' >>> reach("acme.local") '.local'
[ "Return", "reach", "of", "host", "h", "as", "defined", "by", "RFC", "2965", "section", "1", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L673-L706
224,690
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
deepvalues
def deepvalues(mapping): """Iterates over nested mapping, depth-first, in sorted order by key.""" values = vals_sorted_by_key(mapping) for obj in values: mapping = False try: obj.items except AttributeError: pass else: mapping = True for subobj in deepvalues(obj): yield subobj if not mapping: yield obj
python
def deepvalues(mapping): values = vals_sorted_by_key(mapping) for obj in values: mapping = False try: obj.items except AttributeError: pass else: mapping = True for subobj in deepvalues(obj): yield subobj if not mapping: yield obj
[ "def", "deepvalues", "(", "mapping", ")", ":", "values", "=", "vals_sorted_by_key", "(", "mapping", ")", "for", "obj", "in", "values", ":", "mapping", "=", "False", "try", ":", "obj", ".", "items", "except", "AttributeError", ":", "pass", "else", ":", "m...
Iterates over nested mapping, depth-first, in sorted order by key.
[ "Iterates", "over", "nested", "mapping", "depth", "-", "first", "in", "sorted", "order", "by", "key", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1203-L1217
224,691
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
lwp_cookie_str
def lwp_cookie_str(cookie): """Return string representation of Cookie in an the LWP cookie file format. Actually, the format is extended a bit -- see module docstring. """ h = [(cookie.name, cookie.value), ("path", cookie.path), ("domain", cookie.domain)] if cookie.port is not None: h.append(("port", cookie.port)) if cookie.path_specified: h.append(("path_spec", None)) if cookie.port_specified: h.append(("port_spec", None)) if cookie.domain_initial_dot: h.append(("domain_dot", None)) if cookie.secure: h.append(("secure", None)) if cookie.expires: h.append(("expires", time2isoz(float(cookie.expires)))) if cookie.discard: h.append(("discard", None)) if cookie.comment: h.append(("comment", cookie.comment)) if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) keys = sorted(cookie._rest.keys()) for k in keys: h.append((k, str(cookie._rest[k]))) h.append(("version", str(cookie.version))) return join_header_words([h])
python
def lwp_cookie_str(cookie): h = [(cookie.name, cookie.value), ("path", cookie.path), ("domain", cookie.domain)] if cookie.port is not None: h.append(("port", cookie.port)) if cookie.path_specified: h.append(("path_spec", None)) if cookie.port_specified: h.append(("port_spec", None)) if cookie.domain_initial_dot: h.append(("domain_dot", None)) if cookie.secure: h.append(("secure", None)) if cookie.expires: h.append(("expires", time2isoz(float(cookie.expires)))) if cookie.discard: h.append(("discard", None)) if cookie.comment: h.append(("comment", cookie.comment)) if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) keys = sorted(cookie._rest.keys()) for k in keys: h.append((k, str(cookie._rest[k]))) h.append(("version", str(cookie.version))) return join_header_words([h])
[ "def", "lwp_cookie_str", "(", "cookie", ")", ":", "h", "=", "[", "(", "cookie", ".", "name", ",", "cookie", ".", "value", ")", ",", "(", "\"path\"", ",", "cookie", ".", "path", ")", ",", "(", "\"domain\"", ",", "cookie", ".", "domain", ")", "]", ...
Return string representation of Cookie in an the LWP cookie file format. Actually, the format is extended a bit -- see module docstring.
[ "Return", "string", "representation", "of", "Cookie", "in", "an", "the", "LWP", "cookie", "file", "format", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1817-L1843
224,692
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
DefaultCookiePolicy.set_allowed_domains
def set_allowed_domains(self, allowed_domains): """Set the sequence of allowed domains, or None.""" if allowed_domains is not None: allowed_domains = tuple(allowed_domains) self._allowed_domains = allowed_domains
python
def set_allowed_domains(self, allowed_domains): if allowed_domains is not None: allowed_domains = tuple(allowed_domains) self._allowed_domains = allowed_domains
[ "def", "set_allowed_domains", "(", "self", ",", "allowed_domains", ")", ":", "if", "allowed_domains", "is", "not", "None", ":", "allowed_domains", "=", "tuple", "(", "allowed_domains", ")", "self", ".", "_allowed_domains", "=", "allowed_domains" ]
Set the sequence of allowed domains, or None.
[ "Set", "the", "sequence", "of", "allowed", "domains", "or", "None", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L921-L925
224,693
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar._cookies_for_request
def _cookies_for_request(self, request): """Return a list of cookies to be returned to server.""" cookies = [] for domain in self._cookies.keys(): cookies.extend(self._cookies_for_domain(domain, request)) return cookies
python
def _cookies_for_request(self, request): cookies = [] for domain in self._cookies.keys(): cookies.extend(self._cookies_for_domain(domain, request)) return cookies
[ "def", "_cookies_for_request", "(", "self", ",", "request", ")", ":", "cookies", "=", "[", "]", "for", "domain", "in", "self", ".", "_cookies", ".", "keys", "(", ")", ":", "cookies", ".", "extend", "(", "self", ".", "_cookies_for_domain", "(", "domain", ...
Return a list of cookies to be returned to server.
[ "Return", "a", "list", "of", "cookies", "to", "be", "returned", "to", "server", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1268-L1273
224,694
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar._cookie_attrs
def _cookie_attrs(self, cookies): """Return a list of cookie-attributes to be returned to server. like ['foo="bar"; $Path="/"', ...] The $Version attribute is also added when appropriate (currently only once per request). """ # add cookies in order of most specific (ie. longest) path first cookies.sort(key=lambda a: len(a.path), reverse=True) version_set = False attrs = [] for cookie in cookies: # set version of Cookie header # XXX # What should it be if multiple matching Set-Cookie headers have # different versions themselves? # Answer: there is no answer; was supposed to be settled by # RFC 2965 errata, but that may never appear... version = cookie.version if not version_set: version_set = True if version > 0: attrs.append("$Version=%s" % version) # quote cookie value if necessary # (not for Netscape protocol, which already has any quotes # intact, due to the poorly-specified Netscape Cookie: syntax) if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and version > 0): value = self.quote_re.sub(r"\\\1", cookie.value) else: value = cookie.value # add cookie-attributes to be returned in Cookie header if cookie.value is None: attrs.append(cookie.name) else: attrs.append("%s=%s" % (cookie.name, value)) if version > 0: if cookie.path_specified: attrs.append('$Path="%s"' % cookie.path) if cookie.domain.startswith("."): domain = cookie.domain if (not cookie.domain_initial_dot and domain.startswith(".")): domain = domain[1:] attrs.append('$Domain="%s"' % domain) if cookie.port is not None: p = "$Port" if cookie.port_specified: p = p + ('="%s"' % cookie.port) attrs.append(p) return attrs
python
def _cookie_attrs(self, cookies): # add cookies in order of most specific (ie. longest) path first cookies.sort(key=lambda a: len(a.path), reverse=True) version_set = False attrs = [] for cookie in cookies: # set version of Cookie header # XXX # What should it be if multiple matching Set-Cookie headers have # different versions themselves? # Answer: there is no answer; was supposed to be settled by # RFC 2965 errata, but that may never appear... version = cookie.version if not version_set: version_set = True if version > 0: attrs.append("$Version=%s" % version) # quote cookie value if necessary # (not for Netscape protocol, which already has any quotes # intact, due to the poorly-specified Netscape Cookie: syntax) if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and version > 0): value = self.quote_re.sub(r"\\\1", cookie.value) else: value = cookie.value # add cookie-attributes to be returned in Cookie header if cookie.value is None: attrs.append(cookie.name) else: attrs.append("%s=%s" % (cookie.name, value)) if version > 0: if cookie.path_specified: attrs.append('$Path="%s"' % cookie.path) if cookie.domain.startswith("."): domain = cookie.domain if (not cookie.domain_initial_dot and domain.startswith(".")): domain = domain[1:] attrs.append('$Domain="%s"' % domain) if cookie.port is not None: p = "$Port" if cookie.port_specified: p = p + ('="%s"' % cookie.port) attrs.append(p) return attrs
[ "def", "_cookie_attrs", "(", "self", ",", "cookies", ")", ":", "# add cookies in order of most specific (ie. longest) path first", "cookies", ".", "sort", "(", "key", "=", "lambda", "a", ":", "len", "(", "a", ".", "path", ")", ",", "reverse", "=", "True", ")",...
Return a list of cookie-attributes to be returned to server. like ['foo="bar"; $Path="/"', ...] The $Version attribute is also added when appropriate (currently only once per request).
[ "Return", "a", "list", "of", "cookie", "-", "attributes", "to", "be", "returned", "to", "server", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1275-L1332
224,695
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar._normalized_cookie_tuples
def _normalized_cookie_tuples(self, attrs_set): """Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes. """ cookie_tuples = [] boolean_attrs = "discard", "secure" value_attrs = ("version", "expires", "max-age", "domain", "path", "port", "comment", "commenturl") for cookie_attrs in attrs_set: name, value = cookie_attrs[0] # Build dictionary of standard cookie-attributes (standard) and # dictionary of other cookie-attributes (rest). # Note: expiry time is normalised to seconds since epoch. V0 # cookies should have the Expires cookie-attribute, and V1 cookies # should have Max-Age, but since V1 includes RFC 2109 cookies (and # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we # accept either (but prefer Max-Age). max_age_set = False bad_cookie = False standard = {} rest = {} for k, v in cookie_attrs[1:]: lc = k.lower() # don't lose case distinction for unknown fields if lc in value_attrs or lc in boolean_attrs: k = lc if k in boolean_attrs and v is None: # boolean cookie-attribute is present, but has no value # (like "discard", rather than "port=80") v = True if k in standard: # only first value is significant continue if k == "domain": if v is None: _debug(" missing value for domain attribute") bad_cookie = True break # RFC 2965 section 3.3.3 v = v.lower() if k == "expires": if max_age_set: # Prefer max-age to expires (like Mozilla) continue if v is None: _debug(" missing or invalid value for expires " "attribute: treating as session cookie") continue if k == "max-age": max_age_set = True try: v = int(v) except ValueError: _debug(" missing or invalid (non-numeric) value for " "max-age attribute") bad_cookie = True break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 # age-calculation rules. Remember that zero Max-Age is a # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v if (k in value_attrs) or (k in boolean_attrs): if (v is None and k not in ("port", "comment", "commenturl")): _debug(" missing value for %s attribute" % k) bad_cookie = True break standard[k] = v else: rest[k] = v if bad_cookie: continue cookie_tuples.append((name, value, standard, rest)) return cookie_tuples
python
def _normalized_cookie_tuples(self, attrs_set): cookie_tuples = [] boolean_attrs = "discard", "secure" value_attrs = ("version", "expires", "max-age", "domain", "path", "port", "comment", "commenturl") for cookie_attrs in attrs_set: name, value = cookie_attrs[0] # Build dictionary of standard cookie-attributes (standard) and # dictionary of other cookie-attributes (rest). # Note: expiry time is normalised to seconds since epoch. V0 # cookies should have the Expires cookie-attribute, and V1 cookies # should have Max-Age, but since V1 includes RFC 2109 cookies (and # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we # accept either (but prefer Max-Age). max_age_set = False bad_cookie = False standard = {} rest = {} for k, v in cookie_attrs[1:]: lc = k.lower() # don't lose case distinction for unknown fields if lc in value_attrs or lc in boolean_attrs: k = lc if k in boolean_attrs and v is None: # boolean cookie-attribute is present, but has no value # (like "discard", rather than "port=80") v = True if k in standard: # only first value is significant continue if k == "domain": if v is None: _debug(" missing value for domain attribute") bad_cookie = True break # RFC 2965 section 3.3.3 v = v.lower() if k == "expires": if max_age_set: # Prefer max-age to expires (like Mozilla) continue if v is None: _debug(" missing or invalid value for expires " "attribute: treating as session cookie") continue if k == "max-age": max_age_set = True try: v = int(v) except ValueError: _debug(" missing or invalid (non-numeric) value for " "max-age attribute") bad_cookie = True break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 # age-calculation rules. Remember that zero Max-Age is a # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v if (k in value_attrs) or (k in boolean_attrs): if (v is None and k not in ("port", "comment", "commenturl")): _debug(" missing value for %s attribute" % k) bad_cookie = True break standard[k] = v else: rest[k] = v if bad_cookie: continue cookie_tuples.append((name, value, standard, rest)) return cookie_tuples
[ "def", "_normalized_cookie_tuples", "(", "self", ",", "attrs_set", ")", ":", "cookie_tuples", "=", "[", "]", "boolean_attrs", "=", "\"discard\"", ",", "\"secure\"", "value_attrs", "=", "(", "\"version\"", ",", "\"expires\"", ",", "\"max-age\"", ",", "\"domain\"", ...
Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes.
[ "Return", "list", "of", "tuples", "containing", "normalised", "cookie", "information", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1367-L1462
224,696
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar.make_cookies
def make_cookies(self, response, request): """Return sequence of Cookie objects extracted from response object.""" # get cookie-attributes for RFC 2965 and Netscape protocols headers = response.info() rfc2965_hdrs = headers.get_all("Set-Cookie2", []) ns_hdrs = headers.get_all("Set-Cookie", []) rfc2965 = self._policy.rfc2965 netscape = self._policy.netscape if ((not rfc2965_hdrs and not ns_hdrs) or (not ns_hdrs and not rfc2965) or (not rfc2965_hdrs and not netscape) or (not netscape and not rfc2965)): return [] # no relevant cookie headers: quick exit try: cookies = self._cookies_from_attrs_set( split_header_words(rfc2965_hdrs), request) except Exception: _warn_unhandled_exception() cookies = [] if ns_hdrs and netscape: try: # RFC 2109 and Netscape cookies ns_cookies = self._cookies_from_attrs_set( parse_ns_headers(ns_hdrs), request) except Exception: _warn_unhandled_exception() ns_cookies = [] self._process_rfc2109_cookies(ns_cookies) # Look for Netscape cookies (from Set-Cookie headers) that match # corresponding RFC 2965 cookies (from Set-Cookie2 headers). # For each match, keep the RFC 2965 cookie and ignore the Netscape # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are # bundled in with the Netscape cookies for this purpose, which is # reasonable behaviour. if rfc2965: lookup = {} for cookie in cookies: lookup[(cookie.domain, cookie.path, cookie.name)] = None def no_matching_rfc2965(ns_cookie, lookup=lookup): key = ns_cookie.domain, ns_cookie.path, ns_cookie.name return key not in lookup ns_cookies = filter(no_matching_rfc2965, ns_cookies) if ns_cookies: cookies.extend(ns_cookies) return cookies
python
def make_cookies(self, response, request): # get cookie-attributes for RFC 2965 and Netscape protocols headers = response.info() rfc2965_hdrs = headers.get_all("Set-Cookie2", []) ns_hdrs = headers.get_all("Set-Cookie", []) rfc2965 = self._policy.rfc2965 netscape = self._policy.netscape if ((not rfc2965_hdrs and not ns_hdrs) or (not ns_hdrs and not rfc2965) or (not rfc2965_hdrs and not netscape) or (not netscape and not rfc2965)): return [] # no relevant cookie headers: quick exit try: cookies = self._cookies_from_attrs_set( split_header_words(rfc2965_hdrs), request) except Exception: _warn_unhandled_exception() cookies = [] if ns_hdrs and netscape: try: # RFC 2109 and Netscape cookies ns_cookies = self._cookies_from_attrs_set( parse_ns_headers(ns_hdrs), request) except Exception: _warn_unhandled_exception() ns_cookies = [] self._process_rfc2109_cookies(ns_cookies) # Look for Netscape cookies (from Set-Cookie headers) that match # corresponding RFC 2965 cookies (from Set-Cookie2 headers). # For each match, keep the RFC 2965 cookie and ignore the Netscape # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are # bundled in with the Netscape cookies for this purpose, which is # reasonable behaviour. if rfc2965: lookup = {} for cookie in cookies: lookup[(cookie.domain, cookie.path, cookie.name)] = None def no_matching_rfc2965(ns_cookie, lookup=lookup): key = ns_cookie.domain, ns_cookie.path, ns_cookie.name return key not in lookup ns_cookies = filter(no_matching_rfc2965, ns_cookies) if ns_cookies: cookies.extend(ns_cookies) return cookies
[ "def", "make_cookies", "(", "self", ",", "response", ",", "request", ")", ":", "# get cookie-attributes for RFC 2965 and Netscape protocols", "headers", "=", "response", ".", "info", "(", ")", "rfc2965_hdrs", "=", "headers", ".", "get_all", "(", "\"Set-Cookie2\"", "...
Return sequence of Cookie objects extracted from response object.
[ "Return", "sequence", "of", "Cookie", "objects", "extracted", "from", "response", "object", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1577-L1629
224,697
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar.set_cookie_if_ok
def set_cookie_if_ok(self, cookie, request): """Set a cookie if policy says it's OK to do so.""" self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) if self._policy.set_ok(cookie, request): self.set_cookie(cookie) finally: self._cookies_lock.release()
python
def set_cookie_if_ok(self, cookie, request): self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) if self._policy.set_ok(cookie, request): self.set_cookie(cookie) finally: self._cookies_lock.release()
[ "def", "set_cookie_if_ok", "(", "self", ",", "cookie", ",", "request", ")", ":", "self", ".", "_cookies_lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "_policy", ".", "_now", "=", "self", ".", "_now", "=", "int", "(", "time", ".", "time", ...
Set a cookie if policy says it's OK to do so.
[ "Set", "a", "cookie", "if", "policy", "says", "it", "s", "OK", "to", "do", "so", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1631-L1642
224,698
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar.set_cookie
def set_cookie(self, cookie): """Set a cookie, without checking whether or not it should be set.""" c = self._cookies self._cookies_lock.acquire() try: if cookie.domain not in c: c[cookie.domain] = {} c2 = c[cookie.domain] if cookie.path not in c2: c2[cookie.path] = {} c3 = c2[cookie.path] c3[cookie.name] = cookie finally: self._cookies_lock.release()
python
def set_cookie(self, cookie): c = self._cookies self._cookies_lock.acquire() try: if cookie.domain not in c: c[cookie.domain] = {} c2 = c[cookie.domain] if cookie.path not in c2: c2[cookie.path] = {} c3 = c2[cookie.path] c3[cookie.name] = cookie finally: self._cookies_lock.release()
[ "def", "set_cookie", "(", "self", ",", "cookie", ")", ":", "c", "=", "self", ".", "_cookies", "self", ".", "_cookies_lock", ".", "acquire", "(", ")", "try", ":", "if", "cookie", ".", "domain", "not", "in", "c", ":", "c", "[", "cookie", ".", "domain...
Set a cookie, without checking whether or not it should be set.
[ "Set", "a", "cookie", "without", "checking", "whether", "or", "not", "it", "should", "be", "set", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1644-L1655
224,699
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
CookieJar.extract_cookies
def extract_cookies(self, response, request): """Extract cookies from response, where allowable given the request.""" _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) for cookie in self.make_cookies(response, request): if self._policy.set_ok(cookie, request): _debug(" setting cookie: %s", cookie) self.set_cookie(cookie) finally: self._cookies_lock.release()
python
def extract_cookies(self, response, request): _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) for cookie in self.make_cookies(response, request): if self._policy.set_ok(cookie, request): _debug(" setting cookie: %s", cookie) self.set_cookie(cookie) finally: self._cookies_lock.release()
[ "def", "extract_cookies", "(", "self", ",", "response", ",", "request", ")", ":", "_debug", "(", "\"extract_cookies: %s\"", ",", "response", ".", "info", "(", ")", ")", "self", ".", "_cookies_lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "_po...
Extract cookies from response, where allowable given the request.
[ "Extract", "cookies", "from", "response", "where", "allowable", "given", "the", "request", "." ]
c423752879acc05eebc29b0bb9909327bd5c7308
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1657-L1669