id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
13,500
databio/pypiper
pypiper/ngstk.py
NGSTk.merge_or_link
def merge_or_link(self, input_args, raw_folder, local_base="sample"): """ This function standardizes various input possibilities by converting either .bam, .fastq, or .fastq.gz files into a local file; merging those if multiple files given. :param list input_args: This is a list of arguments, each one is a class of inputs (which can in turn be a string or a list). Typically, input_args is a list with 2 elements: first a list of read1 files; second an (optional!) list of read2 files. :param str raw_folder: Name/path of folder for the merge/link. :param str local_base: Usually the sample name. This (plus file extension) will be the name of the local file linked (or merged) by this function. """ self.make_sure_path_exists(raw_folder) if not isinstance(input_args, list): raise Exception("Input must be a list") if any(isinstance(i, list) for i in input_args): # We have a list of lists. Process each individually. local_input_files = list() n_input_files = len(filter(bool, input_args)) print("Number of input file sets:\t\t" + str(n_input_files)) for input_i, input_arg in enumerate(input_args): # Count how many non-null items there are in the list; # we only append _R1 (etc.) if there are multiple input files. if n_input_files > 1: local_base_extended = local_base + "_R" + str(input_i + 1) else: local_base_extended = local_base if input_arg: out = self.merge_or_link( input_arg, raw_folder, local_base_extended) print("Local input file: '{}'".format(out)) # Make sure file exists: if not os.path.isfile(out): print("Not a file: '{}'".format(out)) local_input_files.append(out) return local_input_files else: # We have a list of individual arguments. Merge them. if len(input_args) == 1: # Only one argument in this list. A single input file; we just link # it, regardless of file type: # Pull the value out of the list input_arg = input_args[0] input_ext = self.get_input_ext(input_arg) # Convert to absolute path if not os.path.isabs(input_arg): input_arg = os.path.abspath(input_arg) # Link it to into the raw folder local_input_abs = os.path.join(raw_folder, local_base + input_ext) self.pm.run( "ln -sf " + input_arg + " " + local_input_abs, target=local_input_abs, shell=True) # return the local (linked) filename absolute path return local_input_abs else: # Otherwise, there are multiple inputs. # If more than 1 input file is given, then these are to be merged # if they are in bam format. if all([self.get_input_ext(x) == ".bam" for x in input_args]): sample_merged = local_base + ".merged.bam" output_merge = os.path.join(raw_folder, sample_merged) cmd = self.merge_bams(input_args, output_merge) self.pm.run(cmd, output_merge) cmd2 = self.validate_bam(output_merge) self.pm.run(cmd, output_merge, nofail=True) return output_merge # if multiple fastq if all([self.get_input_ext(x) == ".fastq.gz" for x in input_args]): sample_merged_gz = local_base + ".merged.fastq.gz" output_merge_gz = os.path.join(raw_folder, sample_merged_gz) #cmd1 = self.ziptool + "-d -c " + " ".join(input_args) + " > " + output_merge #cmd2 = self.ziptool + " " + output_merge #self.pm.run([cmd1, cmd2], output_merge_gz) # you can save yourself the decompression/recompression: cmd = "cat " + " ".join(input_args) + " > " + output_merge_gz self.pm.run(cmd, output_merge_gz) return output_merge_gz if all([self.get_input_ext(x) == ".fastq" for x in input_args]): sample_merged = local_base + ".merged.fastq" output_merge = os.path.join(raw_folder, sample_merged) cmd = "cat " + " ".join(input_args) + " > " + output_merge self.pm.run(cmd, output_merge) return output_merge # At this point, we don't recognize the input file types or they # do not match. raise NotImplementedError( "Input files must be of the same type, and can only " "merge bam or fastq.")
python
def merge_or_link(self, input_args, raw_folder, local_base="sample"): self.make_sure_path_exists(raw_folder) if not isinstance(input_args, list): raise Exception("Input must be a list") if any(isinstance(i, list) for i in input_args): # We have a list of lists. Process each individually. local_input_files = list() n_input_files = len(filter(bool, input_args)) print("Number of input file sets:\t\t" + str(n_input_files)) for input_i, input_arg in enumerate(input_args): # Count how many non-null items there are in the list; # we only append _R1 (etc.) if there are multiple input files. if n_input_files > 1: local_base_extended = local_base + "_R" + str(input_i + 1) else: local_base_extended = local_base if input_arg: out = self.merge_or_link( input_arg, raw_folder, local_base_extended) print("Local input file: '{}'".format(out)) # Make sure file exists: if not os.path.isfile(out): print("Not a file: '{}'".format(out)) local_input_files.append(out) return local_input_files else: # We have a list of individual arguments. Merge them. if len(input_args) == 1: # Only one argument in this list. A single input file; we just link # it, regardless of file type: # Pull the value out of the list input_arg = input_args[0] input_ext = self.get_input_ext(input_arg) # Convert to absolute path if not os.path.isabs(input_arg): input_arg = os.path.abspath(input_arg) # Link it to into the raw folder local_input_abs = os.path.join(raw_folder, local_base + input_ext) self.pm.run( "ln -sf " + input_arg + " " + local_input_abs, target=local_input_abs, shell=True) # return the local (linked) filename absolute path return local_input_abs else: # Otherwise, there are multiple inputs. # If more than 1 input file is given, then these are to be merged # if they are in bam format. if all([self.get_input_ext(x) == ".bam" for x in input_args]): sample_merged = local_base + ".merged.bam" output_merge = os.path.join(raw_folder, sample_merged) cmd = self.merge_bams(input_args, output_merge) self.pm.run(cmd, output_merge) cmd2 = self.validate_bam(output_merge) self.pm.run(cmd, output_merge, nofail=True) return output_merge # if multiple fastq if all([self.get_input_ext(x) == ".fastq.gz" for x in input_args]): sample_merged_gz = local_base + ".merged.fastq.gz" output_merge_gz = os.path.join(raw_folder, sample_merged_gz) #cmd1 = self.ziptool + "-d -c " + " ".join(input_args) + " > " + output_merge #cmd2 = self.ziptool + " " + output_merge #self.pm.run([cmd1, cmd2], output_merge_gz) # you can save yourself the decompression/recompression: cmd = "cat " + " ".join(input_args) + " > " + output_merge_gz self.pm.run(cmd, output_merge_gz) return output_merge_gz if all([self.get_input_ext(x) == ".fastq" for x in input_args]): sample_merged = local_base + ".merged.fastq" output_merge = os.path.join(raw_folder, sample_merged) cmd = "cat " + " ".join(input_args) + " > " + output_merge self.pm.run(cmd, output_merge) return output_merge # At this point, we don't recognize the input file types or they # do not match. raise NotImplementedError( "Input files must be of the same type, and can only " "merge bam or fastq.")
[ "def", "merge_or_link", "(", "self", ",", "input_args", ",", "raw_folder", ",", "local_base", "=", "\"sample\"", ")", ":", "self", ".", "make_sure_path_exists", "(", "raw_folder", ")", "if", "not", "isinstance", "(", "input_args", ",", "list", ")", ":", "rai...
This function standardizes various input possibilities by converting either .bam, .fastq, or .fastq.gz files into a local file; merging those if multiple files given. :param list input_args: This is a list of arguments, each one is a class of inputs (which can in turn be a string or a list). Typically, input_args is a list with 2 elements: first a list of read1 files; second an (optional!) list of read2 files. :param str raw_folder: Name/path of folder for the merge/link. :param str local_base: Usually the sample name. This (plus file extension) will be the name of the local file linked (or merged) by this function.
[ "This", "function", "standardizes", "various", "input", "possibilities", "by", "converting", "either", ".", "bam", ".", "fastq", "or", ".", "fastq", ".", "gz", "files", "into", "a", "local", "file", ";", "merging", "those", "if", "multiple", "files", "given"...
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L276-L381
13,501
databio/pypiper
pypiper/ngstk.py
NGSTk.input_to_fastq
def input_to_fastq( self, input_file, sample_name, paired_end, fastq_folder, output_file=None, multiclass=False): """ Builds a command to convert input file to fastq, for various inputs. Takes either .bam, .fastq.gz, or .fastq input and returns commands that will create the .fastq file, regardless of input type. This is useful to made your pipeline easily accept any of these input types seamlessly, standardizing you to the fastq which is still the most common format for adapter trimmers, etc. It will place the output fastq file in given `fastq_folder`. :param str input_file: filename of input you want to convert to fastq :return str: A command (to be run with PipelineManager) that will ensure your fastq file exists. """ fastq_prefix = os.path.join(fastq_folder, sample_name) self.make_sure_path_exists(fastq_folder) # this expects a list; if it gets a string, convert it to a list. if type(input_file) != list: input_file = [input_file] if len(input_file) > 1: cmd = [] output_file = [] for in_i, in_arg in enumerate(input_file): output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq" result_cmd, uf, result_file = \ self.input_to_fastq(in_arg, sample_name, paired_end, fastq_folder, output, multiclass=True) cmd.append(result_cmd) output_file.append(result_file) else: # There was only 1 input class. # Convert back into a string input_file = input_file[0] if not output_file: output_file = fastq_prefix + "_R1.fastq" input_ext = self.get_input_ext(input_file) if input_ext == ".bam": print("Found .bam file") #cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end) cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end) # pm.run(cmd, output_file, follow=check_fastq) elif input_ext == ".fastq.gz": print("Found .fastq.gz file") if paired_end and not multiclass: # For paired-end reads in one fastq file, we must split the file into 2. script_path = os.path.join( self.tools.scripts_dir, "fastq_split.py") cmd = self.tools.python + " -u " + script_path cmd += " -i " + input_file cmd += " -o " + fastq_prefix # Must also return the set of output files output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"] else: # For single-end reads, we just unzip the fastq.gz file. # or, paired-end reads that were already split. cmd = self.ziptool + " -d -c " + input_file + " > " + output_file # a non-shell version # cmd1 = "gunzip --force " + input_file # cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file # cmd = [cmd1, cmd2] elif input_ext == ".fastq": cmd = "ln -sf " + input_file + " " + output_file print("Found .fastq file; no conversion necessary") return [cmd, fastq_prefix, output_file]
python
def input_to_fastq( self, input_file, sample_name, paired_end, fastq_folder, output_file=None, multiclass=False): fastq_prefix = os.path.join(fastq_folder, sample_name) self.make_sure_path_exists(fastq_folder) # this expects a list; if it gets a string, convert it to a list. if type(input_file) != list: input_file = [input_file] if len(input_file) > 1: cmd = [] output_file = [] for in_i, in_arg in enumerate(input_file): output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq" result_cmd, uf, result_file = \ self.input_to_fastq(in_arg, sample_name, paired_end, fastq_folder, output, multiclass=True) cmd.append(result_cmd) output_file.append(result_file) else: # There was only 1 input class. # Convert back into a string input_file = input_file[0] if not output_file: output_file = fastq_prefix + "_R1.fastq" input_ext = self.get_input_ext(input_file) if input_ext == ".bam": print("Found .bam file") #cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end) cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end) # pm.run(cmd, output_file, follow=check_fastq) elif input_ext == ".fastq.gz": print("Found .fastq.gz file") if paired_end and not multiclass: # For paired-end reads in one fastq file, we must split the file into 2. script_path = os.path.join( self.tools.scripts_dir, "fastq_split.py") cmd = self.tools.python + " -u " + script_path cmd += " -i " + input_file cmd += " -o " + fastq_prefix # Must also return the set of output files output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"] else: # For single-end reads, we just unzip the fastq.gz file. # or, paired-end reads that were already split. cmd = self.ziptool + " -d -c " + input_file + " > " + output_file # a non-shell version # cmd1 = "gunzip --force " + input_file # cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file # cmd = [cmd1, cmd2] elif input_ext == ".fastq": cmd = "ln -sf " + input_file + " " + output_file print("Found .fastq file; no conversion necessary") return [cmd, fastq_prefix, output_file]
[ "def", "input_to_fastq", "(", "self", ",", "input_file", ",", "sample_name", ",", "paired_end", ",", "fastq_folder", ",", "output_file", "=", "None", ",", "multiclass", "=", "False", ")", ":", "fastq_prefix", "=", "os", ".", "path", ".", "join", "(", "fast...
Builds a command to convert input file to fastq, for various inputs. Takes either .bam, .fastq.gz, or .fastq input and returns commands that will create the .fastq file, regardless of input type. This is useful to made your pipeline easily accept any of these input types seamlessly, standardizing you to the fastq which is still the most common format for adapter trimmers, etc. It will place the output fastq file in given `fastq_folder`. :param str input_file: filename of input you want to convert to fastq :return str: A command (to be run with PipelineManager) that will ensure your fastq file exists.
[ "Builds", "a", "command", "to", "convert", "input", "file", "to", "fastq", "for", "various", "inputs", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L384-L457
13,502
databio/pypiper
pypiper/ngstk.py
NGSTk.check_fastq
def check_fastq(self, input_files, output_files, paired_end): """ Returns a follow sanity-check function to be run after a fastq conversion. Run following a command that will produce the fastq files. This function will make sure any input files have the same number of reads as the output files. """ # Define a temporary function which we will return, to be called by the # pipeline. # Must define default parameters here based on the parameters passed in. This locks # these values in place, so that the variables will be defined when this function # is called without parameters as a follow function by pm.run. # This is AFTER merge, so if there are multiple files it means the # files were split into read1/read2; therefore I must divide by number # of files for final reads. def temp_func(input_files=input_files, output_files=output_files, paired_end=paired_end): if type(input_files) != list: input_files = [input_files] if type(output_files) != list: output_files = [output_files] print(input_files) print(output_files) n_input_files = len(filter(bool, input_files)) total_reads = sum([int(self.count_reads(input_file, paired_end)) for input_file in input_files]) raw_reads = total_reads / n_input_files self.pm.report_result("Raw_reads", str(raw_reads)) total_fastq_reads = sum( [int(self.count_reads(output_file, paired_end)) for output_file in output_files]) fastq_reads = total_fastq_reads / n_input_files self.pm.report_result("Fastq_reads", fastq_reads) input_ext = self.get_input_ext(input_files[0]) # We can only assess pass filter reads in bam files with flags. if input_ext == ".bam": num_failed_filter = sum( [int(self.count_fail_reads(f, paired_end)) for f in input_files]) pf_reads = int(raw_reads) - num_failed_filter self.pm.report_result("PF_reads", str(pf_reads)) if fastq_reads != int(raw_reads): raise Exception("Fastq conversion error? Number of reads " "doesn't match unaligned bam") return fastq_reads return temp_func
python
def check_fastq(self, input_files, output_files, paired_end): # Define a temporary function which we will return, to be called by the # pipeline. # Must define default parameters here based on the parameters passed in. This locks # these values in place, so that the variables will be defined when this function # is called without parameters as a follow function by pm.run. # This is AFTER merge, so if there are multiple files it means the # files were split into read1/read2; therefore I must divide by number # of files for final reads. def temp_func(input_files=input_files, output_files=output_files, paired_end=paired_end): if type(input_files) != list: input_files = [input_files] if type(output_files) != list: output_files = [output_files] print(input_files) print(output_files) n_input_files = len(filter(bool, input_files)) total_reads = sum([int(self.count_reads(input_file, paired_end)) for input_file in input_files]) raw_reads = total_reads / n_input_files self.pm.report_result("Raw_reads", str(raw_reads)) total_fastq_reads = sum( [int(self.count_reads(output_file, paired_end)) for output_file in output_files]) fastq_reads = total_fastq_reads / n_input_files self.pm.report_result("Fastq_reads", fastq_reads) input_ext = self.get_input_ext(input_files[0]) # We can only assess pass filter reads in bam files with flags. if input_ext == ".bam": num_failed_filter = sum( [int(self.count_fail_reads(f, paired_end)) for f in input_files]) pf_reads = int(raw_reads) - num_failed_filter self.pm.report_result("PF_reads", str(pf_reads)) if fastq_reads != int(raw_reads): raise Exception("Fastq conversion error? Number of reads " "doesn't match unaligned bam") return fastq_reads return temp_func
[ "def", "check_fastq", "(", "self", ",", "input_files", ",", "output_files", ",", "paired_end", ")", ":", "# Define a temporary function which we will return, to be called by the", "# pipeline.", "# Must define default parameters here based on the parameters passed in. This locks", "# t...
Returns a follow sanity-check function to be run after a fastq conversion. Run following a command that will produce the fastq files. This function will make sure any input files have the same number of reads as the output files.
[ "Returns", "a", "follow", "sanity", "-", "check", "function", "to", "be", "run", "after", "a", "fastq", "conversion", ".", "Run", "following", "a", "command", "that", "will", "produce", "the", "fastq", "files", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L460-L516
13,503
databio/pypiper
pypiper/ngstk.py
NGSTk.check_trim
def check_trim(self, trimmed_fastq, paired_end, trimmed_fastq_R2=None, fastqc_folder=None): """ Build function to evaluate read trimming, and optionally run fastqc. This is useful to construct an argument for the 'follow' parameter of a PipelineManager's 'run' method. :param str trimmed_fastq: Path to trimmed reads file. :param bool paired_end: Whether the processing is being done with paired-end sequencing data. :param str trimmed_fastq_R2: Path to read 2 file for the paired-end case. :param str fastqc_folder: Path to folder within which to place fastqc output files; if unspecified, fastqc will not be run. :return callable: Function to evaluate read trimming and possibly run fastqc. """ def temp_func(): print("Evaluating read trimming") if paired_end and not trimmed_fastq_R2: print("WARNING: specified paired-end but no R2 file") n_trim = float(self.count_reads(trimmed_fastq, paired_end)) self.pm.report_result("Trimmed_reads", int(n_trim)) try: rr = float(self.pm.get_stat("Raw_reads")) except: print("Can't calculate trim loss rate without raw read result.") else: self.pm.report_result( "Trim_loss_rate", round((rr - n_trim) * 100 / rr, 2)) # Also run a fastqc (if installed/requested) if fastqc_folder: if fastqc_folder and os.path.isabs(fastqc_folder): self.make_sure_path_exists(fastqc_folder) cmd = self.fastqc(trimmed_fastq, fastqc_folder) self.pm.run(cmd, lock_name="trimmed_fastqc", nofail=True) fname, ext = os.path.splitext(os.path.basename(trimmed_fastq)) fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html") self.pm.report_object("FastQC report r1", fastqc_html) if paired_end and trimmed_fastq_R2: cmd = self.fastqc(trimmed_fastq_R2, fastqc_folder) self.pm.run(cmd, lock_name="trimmed_fastqc_R2", nofail=True) fname, ext = os.path.splitext(os.path.basename(trimmed_fastq_R2)) fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html") self.pm.report_object("FastQC report r2", fastqc_html) return temp_func
python
def check_trim(self, trimmed_fastq, paired_end, trimmed_fastq_R2=None, fastqc_folder=None): def temp_func(): print("Evaluating read trimming") if paired_end and not trimmed_fastq_R2: print("WARNING: specified paired-end but no R2 file") n_trim = float(self.count_reads(trimmed_fastq, paired_end)) self.pm.report_result("Trimmed_reads", int(n_trim)) try: rr = float(self.pm.get_stat("Raw_reads")) except: print("Can't calculate trim loss rate without raw read result.") else: self.pm.report_result( "Trim_loss_rate", round((rr - n_trim) * 100 / rr, 2)) # Also run a fastqc (if installed/requested) if fastqc_folder: if fastqc_folder and os.path.isabs(fastqc_folder): self.make_sure_path_exists(fastqc_folder) cmd = self.fastqc(trimmed_fastq, fastqc_folder) self.pm.run(cmd, lock_name="trimmed_fastqc", nofail=True) fname, ext = os.path.splitext(os.path.basename(trimmed_fastq)) fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html") self.pm.report_object("FastQC report r1", fastqc_html) if paired_end and trimmed_fastq_R2: cmd = self.fastqc(trimmed_fastq_R2, fastqc_folder) self.pm.run(cmd, lock_name="trimmed_fastqc_R2", nofail=True) fname, ext = os.path.splitext(os.path.basename(trimmed_fastq_R2)) fastqc_html = os.path.join(fastqc_folder, fname + "_fastqc.html") self.pm.report_object("FastQC report r2", fastqc_html) return temp_func
[ "def", "check_trim", "(", "self", ",", "trimmed_fastq", ",", "paired_end", ",", "trimmed_fastq_R2", "=", "None", ",", "fastqc_folder", "=", "None", ")", ":", "def", "temp_func", "(", ")", ":", "print", "(", "\"Evaluating read trimming\"", ")", "if", "paired_en...
Build function to evaluate read trimming, and optionally run fastqc. This is useful to construct an argument for the 'follow' parameter of a PipelineManager's 'run' method. :param str trimmed_fastq: Path to trimmed reads file. :param bool paired_end: Whether the processing is being done with paired-end sequencing data. :param str trimmed_fastq_R2: Path to read 2 file for the paired-end case. :param str fastqc_folder: Path to folder within which to place fastqc output files; if unspecified, fastqc will not be run. :return callable: Function to evaluate read trimming and possibly run fastqc.
[ "Build", "function", "to", "evaluate", "read", "trimming", "and", "optionally", "run", "fastqc", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L519-L570
13,504
databio/pypiper
pypiper/ngstk.py
NGSTk.validate_bam
def validate_bam(self, input_bam): """ Wrapper for Picard's ValidateSamFile. :param str input_bam: Path to file to validate. :return str: Command to run for the validation. """ cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " ValidateSamFile" cmd += " INPUT=" + input_bam return cmd
python
def validate_bam(self, input_bam): cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " ValidateSamFile" cmd += " INPUT=" + input_bam return cmd
[ "def", "validate_bam", "(", "self", ",", "input_bam", ")", ":", "cmd", "=", "self", ".", "tools", ".", "java", "+", "\" -Xmx\"", "+", "self", ".", "pm", ".", "javamem", "cmd", "+=", "\" -jar \"", "+", "self", ".", "tools", ".", "picard", "+", "\" Val...
Wrapper for Picard's ValidateSamFile. :param str input_bam: Path to file to validate. :return str: Command to run for the validation.
[ "Wrapper", "for", "Picard", "s", "ValidateSamFile", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L573-L583
13,505
databio/pypiper
pypiper/ngstk.py
NGSTk.merge_bams
def merge_bams(self, input_bams, merged_bam, in_sorted="TRUE", tmp_dir=None): """ Combine multiple files into one. The tmp_dir parameter is important because on poorly configured systems, the default can sometimes fill up. :param Iterable[str] input_bams: Paths to files to combine :param str merged_bam: Path to which to write combined result. :param bool | str in_sorted: Whether the inputs are sorted :param str tmp_dir: Path to temporary directory. """ if not len(input_bams) > 1: print("No merge required") return 0 outdir, _ = os.path.split(merged_bam) if outdir and not os.path.exists(outdir): print("Creating path to merge file's folder: '{}'".format(outdir)) os.makedirs(outdir) # Handle more intuitive boolean argument. if in_sorted in [False, True]: in_sorted = "TRUE" if in_sorted else "FALSE" input_string = " INPUT=" + " INPUT=".join(input_bams) cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " MergeSamFiles" cmd += input_string cmd += " OUTPUT=" + merged_bam cmd += " ASSUME_SORTED=" + str(in_sorted) cmd += " CREATE_INDEX=TRUE" cmd += " VALIDATION_STRINGENCY=SILENT" if tmp_dir: cmd += " TMP_DIR=" + tmp_dir return cmd
python
def merge_bams(self, input_bams, merged_bam, in_sorted="TRUE", tmp_dir=None): if not len(input_bams) > 1: print("No merge required") return 0 outdir, _ = os.path.split(merged_bam) if outdir and not os.path.exists(outdir): print("Creating path to merge file's folder: '{}'".format(outdir)) os.makedirs(outdir) # Handle more intuitive boolean argument. if in_sorted in [False, True]: in_sorted = "TRUE" if in_sorted else "FALSE" input_string = " INPUT=" + " INPUT=".join(input_bams) cmd = self.tools.java + " -Xmx" + self.pm.javamem cmd += " -jar " + self.tools.picard + " MergeSamFiles" cmd += input_string cmd += " OUTPUT=" + merged_bam cmd += " ASSUME_SORTED=" + str(in_sorted) cmd += " CREATE_INDEX=TRUE" cmd += " VALIDATION_STRINGENCY=SILENT" if tmp_dir: cmd += " TMP_DIR=" + tmp_dir return cmd
[ "def", "merge_bams", "(", "self", ",", "input_bams", ",", "merged_bam", ",", "in_sorted", "=", "\"TRUE\"", ",", "tmp_dir", "=", "None", ")", ":", "if", "not", "len", "(", "input_bams", ")", ">", "1", ":", "print", "(", "\"No merge required\"", ")", "retu...
Combine multiple files into one. The tmp_dir parameter is important because on poorly configured systems, the default can sometimes fill up. :param Iterable[str] input_bams: Paths to files to combine :param str merged_bam: Path to which to write combined result. :param bool | str in_sorted: Whether the inputs are sorted :param str tmp_dir: Path to temporary directory.
[ "Combine", "multiple", "files", "into", "one", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L586-L621
13,506
databio/pypiper
pypiper/ngstk.py
NGSTk.count_lines
def count_lines(self, file_name): """ Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc. :param str file_name: name of file whose lines are to be counted """ x = subprocess.check_output("wc -l " + file_name + " | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '", shell=True) return x.strip()
python
def count_lines(self, file_name): x = subprocess.check_output("wc -l " + file_name + " | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '", shell=True) return x.strip()
[ "def", "count_lines", "(", "self", ",", "file_name", ")", ":", "x", "=", "subprocess", ".", "check_output", "(", "\"wc -l \"", "+", "file_name", "+", "\" | sed -E 's/^[[:space:]]+//' | cut -f1 -d' '\"", ",", "shell", "=", "True", ")", "return", "x", ".", "strip"...
Uses the command-line utility wc to count the number of lines in a file. For MacOS, must strip leading whitespace from wc. :param str file_name: name of file whose lines are to be counted
[ "Uses", "the", "command", "-", "line", "utility", "wc", "to", "count", "the", "number", "of", "lines", "in", "a", "file", ".", "For", "MacOS", "must", "strip", "leading", "whitespace", "from", "wc", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L649-L656
13,507
databio/pypiper
pypiper/ngstk.py
NGSTk.get_chrs_from_bam
def get_chrs_from_bam(self, file_name): """ Uses samtools to grab the chromosomes from the header that are contained in this bam file. """ x = subprocess.check_output(self.tools.samtools + " view -H " + file_name + " | grep '^@SQ' | cut -f2| sed s'/SN://'", shell=True) # Chromosomes will be separated by newlines; split into list to return return x.split()
python
def get_chrs_from_bam(self, file_name): x = subprocess.check_output(self.tools.samtools + " view -H " + file_name + " | grep '^@SQ' | cut -f2| sed s'/SN://'", shell=True) # Chromosomes will be separated by newlines; split into list to return return x.split()
[ "def", "get_chrs_from_bam", "(", "self", ",", "file_name", ")", ":", "x", "=", "subprocess", ".", "check_output", "(", "self", ".", "tools", ".", "samtools", "+", "\" view -H \"", "+", "file_name", "+", "\" | grep '^@SQ' | cut -f2| sed s'/SN://'\"", ",", "shell", ...
Uses samtools to grab the chromosomes from the header that are contained in this bam file.
[ "Uses", "samtools", "to", "grab", "the", "chromosomes", "from", "the", "header", "that", "are", "contained", "in", "this", "bam", "file", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L667-L674
13,508
databio/pypiper
pypiper/ngstk.py
NGSTk.count_unique_mapped_reads
def count_unique_mapped_reads(self, file_name, paired_end): """ For a bam or sam file with paired or or single-end reads, returns the number of mapped reads, counting each read only once, even if it appears mapped at multiple locations. :param str file_name: name of reads file :param bool paired_end: True/False paired end data :return int: Number of uniquely mapped reads. """ _, ext = os.path.splitext(file_name) ext = ext.lower() if ext == ".sam": param = "-S -F4" elif ext == "bam": param = "-F4" else: raise ValueError("Not a SAM or BAM: '{}'".format(file_name)) if paired_end: r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") else: r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") r2 = 0 return int(r1) + int(r2)
python
def count_unique_mapped_reads(self, file_name, paired_end): _, ext = os.path.splitext(file_name) ext = ext.lower() if ext == ".sam": param = "-S -F4" elif ext == "bam": param = "-F4" else: raise ValueError("Not a SAM or BAM: '{}'".format(file_name)) if paired_end: r1 = self.samtools_view(file_name, param=param + " -f64", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") r2 = self.samtools_view(file_name, param=param + " -f128", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") else: r1 = self.samtools_view(file_name, param=param + "", postpend=" | cut -f1 | sort -k1,1 -u | wc -l | sed -E 's/^[[:space:]]+//'") r2 = 0 return int(r1) + int(r2)
[ "def", "count_unique_mapped_reads", "(", "self", ",", "file_name", ",", "paired_end", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "ext", "=", "ext", ".", "lower", "(", ")", "if", "ext", "==", "\".sam\"", ...
For a bam or sam file with paired or or single-end reads, returns the number of mapped reads, counting each read only once, even if it appears mapped at multiple locations. :param str file_name: name of reads file :param bool paired_end: True/False paired end data :return int: Number of uniquely mapped reads.
[ "For", "a", "bam", "or", "sam", "file", "with", "paired", "or", "or", "single", "-", "end", "reads", "returns", "the", "number", "of", "mapped", "reads", "counting", "each", "read", "only", "once", "even", "if", "it", "appears", "mapped", "at", "multiple...
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L704-L732
13,509
databio/pypiper
pypiper/ngstk.py
NGSTk.count_flag_reads
def count_flag_reads(self, file_name, flag, paired_end): """ Counts the number of reads with the specified flag. :param str file_name: name of reads file :param str flag: sam flag value to be read :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. """ param = " -c -f" + str(flag) if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
python
def count_flag_reads(self, file_name, flag, paired_end): param = " -c -f" + str(flag) if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
[ "def", "count_flag_reads", "(", "self", ",", "file_name", ",", "flag", ",", "paired_end", ")", ":", "param", "=", "\" -c -f\"", "+", "str", "(", "flag", ")", "if", "file_name", ".", "endswith", "(", "\"sam\"", ")", ":", "param", "+=", "\" -S\"", "return"...
Counts the number of reads with the specified flag. :param str file_name: name of reads file :param str flag: sam flag value to be read :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development.
[ "Counts", "the", "number", "of", "reads", "with", "the", "specified", "flag", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L735-L750
13,510
databio/pypiper
pypiper/ngstk.py
NGSTk.count_uniquelymapping_reads
def count_uniquelymapping_reads(self, file_name, paired_end): """ Counts the number of reads that mapped to a unique position. :param str file_name: name of reads file :param bool paired_end: This parameter is ignored. """ param = " -c -F256" if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
python
def count_uniquelymapping_reads(self, file_name, paired_end): param = " -c -F256" if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
[ "def", "count_uniquelymapping_reads", "(", "self", ",", "file_name", ",", "paired_end", ")", ":", "param", "=", "\" -c -F256\"", "if", "file_name", ".", "endswith", "(", "\"sam\"", ")", ":", "param", "+=", "\" -S\"", "return", "self", ".", "samtools_view", "("...
Counts the number of reads that mapped to a unique position. :param str file_name: name of reads file :param bool paired_end: This parameter is ignored.
[ "Counts", "the", "number", "of", "reads", "that", "mapped", "to", "a", "unique", "position", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L769-L779
13,511
databio/pypiper
pypiper/ngstk.py
NGSTk.samtools_view
def samtools_view(self, file_name, param, postpend=""): """ Run samtools view, with flexible parameters and post-processing. This is used internally to implement the various count_reads functions. :param str file_name: file_name :param str param: String of parameters to pass to samtools view :param str postpend: String to append to the samtools command; useful to add cut, sort, wc operations to the samtools view output. """ cmd = "{} view {} {} {}".format( self.tools.samtools, param, file_name, postpend) return subprocess.check_output(cmd, shell=True)
python
def samtools_view(self, file_name, param, postpend=""): cmd = "{} view {} {} {}".format( self.tools.samtools, param, file_name, postpend) return subprocess.check_output(cmd, shell=True)
[ "def", "samtools_view", "(", "self", ",", "file_name", ",", "param", ",", "postpend", "=", "\"\"", ")", ":", "cmd", "=", "\"{} view {} {} {}\"", ".", "format", "(", "self", ".", "tools", ".", "samtools", ",", "param", ",", "file_name", ",", "postpend", "...
Run samtools view, with flexible parameters and post-processing. This is used internally to implement the various count_reads functions. :param str file_name: file_name :param str param: String of parameters to pass to samtools view :param str postpend: String to append to the samtools command; useful to add cut, sort, wc operations to the samtools view output.
[ "Run", "samtools", "view", "with", "flexible", "parameters", "and", "post", "-", "processing", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L793-L806
13,512
databio/pypiper
pypiper/ngstk.py
NGSTk.count_reads
def count_reads(self, file_name, paired_end): """ Count reads in a file. Paired-end reads count as 2 in this function. For paired-end reads, this function assumes that the reads are split into 2 files, so it divides line count by 2 instead of 4. This will thus give an incorrect result if your paired-end fastq files are in only a single file (you must divide by 2 again). :param str file_name: Name/path of file whose reads are to be counted. :param bool paired_end: Whether the file contains paired-end reads. """ _, ext = os.path.splitext(file_name) if not (is_sam_or_bam(file_name) or is_fastq(file_name)): # TODO: make this an exception and force caller to handle that # rather than relying on knowledge of possibility of negative value. return -1 if is_sam_or_bam(file_name): param_text = "-c" if ext == ".bam" else "-c -S" return self.samtools_view(file_name, param=param_text) else: num_lines = self.count_lines_zip(file_name) \ if is_gzipped_fastq(file_name) \ else self.count_lines(file_name) divisor = 2 if paired_end else 4 return int(num_lines) / divisor
python
def count_reads(self, file_name, paired_end): _, ext = os.path.splitext(file_name) if not (is_sam_or_bam(file_name) or is_fastq(file_name)): # TODO: make this an exception and force caller to handle that # rather than relying on knowledge of possibility of negative value. return -1 if is_sam_or_bam(file_name): param_text = "-c" if ext == ".bam" else "-c -S" return self.samtools_view(file_name, param=param_text) else: num_lines = self.count_lines_zip(file_name) \ if is_gzipped_fastq(file_name) \ else self.count_lines(file_name) divisor = 2 if paired_end else 4 return int(num_lines) / divisor
[ "def", "count_reads", "(", "self", ",", "file_name", ",", "paired_end", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "if", "not", "(", "is_sam_or_bam", "(", "file_name", ")", "or", "is_fastq", "(", "file_na...
Count reads in a file. Paired-end reads count as 2 in this function. For paired-end reads, this function assumes that the reads are split into 2 files, so it divides line count by 2 instead of 4. This will thus give an incorrect result if your paired-end fastq files are in only a single file (you must divide by 2 again). :param str file_name: Name/path of file whose reads are to be counted. :param bool paired_end: Whether the file contains paired-end reads.
[ "Count", "reads", "in", "a", "file", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L809-L837
13,513
databio/pypiper
pypiper/ngstk.py
NGSTk.count_concordant
def count_concordant(self, aligned_bam): """ Count only reads that "aligned concordantly exactly 1 time." :param str aligned_bam: File for which to count mapped reads. """ cmd = self.tools.samtools + " view " + aligned_bam + " | " cmd += "grep 'YT:Z:CP'" + " | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'" return subprocess.check_output(cmd, shell=True)
python
def count_concordant(self, aligned_bam): cmd = self.tools.samtools + " view " + aligned_bam + " | " cmd += "grep 'YT:Z:CP'" + " | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'" return subprocess.check_output(cmd, shell=True)
[ "def", "count_concordant", "(", "self", ",", "aligned_bam", ")", ":", "cmd", "=", "self", ".", "tools", ".", "samtools", "+", "\" view \"", "+", "aligned_bam", "+", "\" | \"", "cmd", "+=", "\"grep 'YT:Z:CP'\"", "+", "\" | uniq -u | wc -l | sed -E 's/^[[:space:]]+//'...
Count only reads that "aligned concordantly exactly 1 time." :param str aligned_bam: File for which to count mapped reads.
[ "Count", "only", "reads", "that", "aligned", "concordantly", "exactly", "1", "time", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L840-L849
13,514
databio/pypiper
pypiper/ngstk.py
NGSTk.count_mapped_reads
def count_mapped_reads(self, file_name, paired_end): """ Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq, and therefore, doesn't require a paired-end parameter because it only uses samtools view. Therefore, it's ok that it has a default parameter, since this is discarded. :param str file_name: File for which to count mapped reads. :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. :return int: Either return code from samtools view command, or -1 to indicate an error state. """ if file_name.endswith("bam"): return self.samtools_view(file_name, param="-c -F4") if file_name.endswith("sam"): return self.samtools_view(file_name, param="-c -F4 -S") return -1
python
def count_mapped_reads(self, file_name, paired_end): if file_name.endswith("bam"): return self.samtools_view(file_name, param="-c -F4") if file_name.endswith("sam"): return self.samtools_view(file_name, param="-c -F4 -S") return -1
[ "def", "count_mapped_reads", "(", "self", ",", "file_name", ",", "paired_end", ")", ":", "if", "file_name", ".", "endswith", "(", "\"bam\"", ")", ":", "return", "self", ".", "samtools_view", "(", "file_name", ",", "param", "=", "\"-c -F4\"", ")", "if", "fi...
Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq, and therefore, doesn't require a paired-end parameter because it only uses samtools view. Therefore, it's ok that it has a default parameter, since this is discarded. :param str file_name: File for which to count mapped reads. :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. :return int: Either return code from samtools view command, or -1 to indicate an error state.
[ "Mapped_reads", "are", "not", "in", "fastq", "format", "so", "this", "one", "doesn", "t", "need", "to", "accommodate", "fastq", "and", "therefore", "doesn", "t", "require", "a", "paired", "-", "end", "parameter", "because", "it", "only", "uses", "samtools", ...
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L852-L869
13,515
databio/pypiper
pypiper/ngstk.py
NGSTk.sam_conversions
def sam_conversions(self, sam_file, depth=True): """ Convert sam files to bam files, then sort and index them for later use. :param bool depth: also calculate coverage over each position """ cmd = self.tools.samtools + " view -bS " + sam_file + " > " + sam_file.replace(".sam", ".bam") + "\n" cmd += self.tools.samtools + " sort " + sam_file.replace(".sam", ".bam") + " -o " + sam_file.replace(".sam", "_sorted.bam") + "\n" cmd += self.tools.samtools + " index " + sam_file.replace(".sam", "_sorted.bam") + "\n" if depth: cmd += self.tools.samtools + " depth " + sam_file.replace(".sam", "_sorted.bam") + " > " + sam_file.replace(".sam", "_sorted.depth") + "\n" return cmd
python
def sam_conversions(self, sam_file, depth=True): cmd = self.tools.samtools + " view -bS " + sam_file + " > " + sam_file.replace(".sam", ".bam") + "\n" cmd += self.tools.samtools + " sort " + sam_file.replace(".sam", ".bam") + " -o " + sam_file.replace(".sam", "_sorted.bam") + "\n" cmd += self.tools.samtools + " index " + sam_file.replace(".sam", "_sorted.bam") + "\n" if depth: cmd += self.tools.samtools + " depth " + sam_file.replace(".sam", "_sorted.bam") + " > " + sam_file.replace(".sam", "_sorted.depth") + "\n" return cmd
[ "def", "sam_conversions", "(", "self", ",", "sam_file", ",", "depth", "=", "True", ")", ":", "cmd", "=", "self", ".", "tools", ".", "samtools", "+", "\" view -bS \"", "+", "sam_file", "+", "\" > \"", "+", "sam_file", ".", "replace", "(", "\".sam\"", ",",...
Convert sam files to bam files, then sort and index them for later use. :param bool depth: also calculate coverage over each position
[ "Convert", "sam", "files", "to", "bam", "files", "then", "sort", "and", "index", "them", "for", "later", "use", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L872-L883
13,516
databio/pypiper
pypiper/ngstk.py
NGSTk.bam_conversions
def bam_conversions(self, bam_file, depth=True): """ Sort and index bam files for later use. :param bool depth: also calculate coverage over each position """ cmd = self.tools.samtools + " view -h " + bam_file + " > " + bam_file.replace(".bam", ".sam") + "\n" cmd += self.tools.samtools + " sort " + bam_file + " -o " + bam_file.replace(".bam", "_sorted.bam") + "\n" cmd += self.tools.samtools + " index " + bam_file.replace(".bam", "_sorted.bam") + "\n" if depth: cmd += self.tools.samtools + " depth " + bam_file.replace(".bam", "_sorted.bam") + " > " + bam_file.replace(".bam", "_sorted.depth") + "\n" return cmd
python
def bam_conversions(self, bam_file, depth=True): cmd = self.tools.samtools + " view -h " + bam_file + " > " + bam_file.replace(".bam", ".sam") + "\n" cmd += self.tools.samtools + " sort " + bam_file + " -o " + bam_file.replace(".bam", "_sorted.bam") + "\n" cmd += self.tools.samtools + " index " + bam_file.replace(".bam", "_sorted.bam") + "\n" if depth: cmd += self.tools.samtools + " depth " + bam_file.replace(".bam", "_sorted.bam") + " > " + bam_file.replace(".bam", "_sorted.depth") + "\n" return cmd
[ "def", "bam_conversions", "(", "self", ",", "bam_file", ",", "depth", "=", "True", ")", ":", "cmd", "=", "self", ".", "tools", ".", "samtools", "+", "\" view -h \"", "+", "bam_file", "+", "\" > \"", "+", "bam_file", ".", "replace", "(", "\".bam\"", ",", ...
Sort and index bam files for later use. :param bool depth: also calculate coverage over each position
[ "Sort", "and", "index", "bam", "files", "for", "later", "use", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L886-L897
13,517
databio/pypiper
pypiper/ngstk.py
NGSTk.fastqc
def fastqc(self, file, output_dir): """ Create command to run fastqc on a FASTQ file :param str file: Path to file with sequencing reads :param str output_dir: Path to folder in which to place output :return str: Command with which to run fastqc """ # You can find the fastqc help with fastqc --help try: pm = self.pm except AttributeError: # Do nothing, this is just for path construction. pass else: if not os.path.isabs(output_dir) and pm is not None: output_dir = os.path.join(pm.outfolder, output_dir) self.make_sure_path_exists(output_dir) return "{} --noextract --outdir {} {}".\ format(self.tools.fastqc, output_dir, file)
python
def fastqc(self, file, output_dir): # You can find the fastqc help with fastqc --help try: pm = self.pm except AttributeError: # Do nothing, this is just for path construction. pass else: if not os.path.isabs(output_dir) and pm is not None: output_dir = os.path.join(pm.outfolder, output_dir) self.make_sure_path_exists(output_dir) return "{} --noextract --outdir {} {}".\ format(self.tools.fastqc, output_dir, file)
[ "def", "fastqc", "(", "self", ",", "file", ",", "output_dir", ")", ":", "# You can find the fastqc help with fastqc --help", "try", ":", "pm", "=", "self", ".", "pm", "except", "AttributeError", ":", "# Do nothing, this is just for path construction.", "pass", "else", ...
Create command to run fastqc on a FASTQ file :param str file: Path to file with sequencing reads :param str output_dir: Path to folder in which to place output :return str: Command with which to run fastqc
[ "Create", "command", "to", "run", "fastqc", "on", "a", "FASTQ", "file" ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L900-L919
13,518
databio/pypiper
pypiper/ngstk.py
NGSTk.fastqc_rename
def fastqc_rename(self, input_bam, output_dir, sample_name): """ Create pair of commands to run fastqc and organize files. The first command returned is the one that actually runs fastqc when it's executed; the second moves the output files to the output folder for the sample indicated. :param str input_bam: Path to file for which to run fastqc. :param str output_dir: Path to folder in which fastqc output will be written, and within which the sample's output folder lives. :param str sample_name: Sample name, which determines subfolder within output_dir for the fastqc files. :return list[str]: Pair of commands, to run fastqc and then move the files to their intended destination based on sample name. """ cmds = list() initial = os.path.splitext(os.path.basename(input_bam))[0] cmd1 = self.fastqc(input_bam, output_dir) cmds.append(cmd1) cmd2 = "if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi".format( os.path.join(output_dir, initial), os.path.join(output_dir, sample_name)) cmds.append(cmd2) return cmds
python
def fastqc_rename(self, input_bam, output_dir, sample_name): cmds = list() initial = os.path.splitext(os.path.basename(input_bam))[0] cmd1 = self.fastqc(input_bam, output_dir) cmds.append(cmd1) cmd2 = "if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi".format( os.path.join(output_dir, initial), os.path.join(output_dir, sample_name)) cmds.append(cmd2) return cmds
[ "def", "fastqc_rename", "(", "self", ",", "input_bam", ",", "output_dir", ",", "sample_name", ")", ":", "cmds", "=", "list", "(", ")", "initial", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "input_bam", ")", ...
Create pair of commands to run fastqc and organize files. The first command returned is the one that actually runs fastqc when it's executed; the second moves the output files to the output folder for the sample indicated. :param str input_bam: Path to file for which to run fastqc. :param str output_dir: Path to folder in which fastqc output will be written, and within which the sample's output folder lives. :param str sample_name: Sample name, which determines subfolder within output_dir for the fastqc files. :return list[str]: Pair of commands, to run fastqc and then move the files to their intended destination based on sample name.
[ "Create", "pair", "of", "commands", "to", "run", "fastqc", "and", "organize", "files", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L922-L945
13,519
databio/pypiper
pypiper/ngstk.py
NGSTk.samtools_index
def samtools_index(self, bam_file): """Index a bam file.""" cmd = self.tools.samtools + " index {0}".format(bam_file) return cmd
python
def samtools_index(self, bam_file): cmd = self.tools.samtools + " index {0}".format(bam_file) return cmd
[ "def", "samtools_index", "(", "self", ",", "bam_file", ")", ":", "cmd", "=", "self", ".", "tools", ".", "samtools", "+", "\" index {0}\"", ".", "format", "(", "bam_file", ")", "return", "cmd" ]
Index a bam file.
[ "Index", "a", "bam", "file", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L948-L951
13,520
databio/pypiper
pypiper/ngstk.py
NGSTk.skewer
def skewer( self, input_fastq1, output_prefix, output_fastq1, log, cpus, adapters, input_fastq2=None, output_fastq2=None): """ Create commands with which to run skewer. :param str input_fastq1: Path to input (read 1) FASTQ file :param str output_prefix: Prefix for output FASTQ file names :param str output_fastq1: Path to (read 1) output FASTQ file :param str log: Path to file to which to write logging information :param int | str cpus: Number of processing cores to allow :param str adapters: Path to file with sequencing adapters :param str input_fastq2: Path to read 2 input FASTQ file :param str output_fastq2: Path to read 2 output FASTQ file :return list[str]: Sequence of commands to run to trim reads with skewer and rename files as desired. """ pe = input_fastq2 is not None mode = "pe" if pe else "any" cmds = list() cmd1 = self.tools.skewer + " --quiet" cmd1 += " -f sanger" cmd1 += " -t {0}".format(cpus) cmd1 += " -m {0}".format(mode) cmd1 += " -x {0}".format(adapters) cmd1 += " -o {0}".format(output_prefix) cmd1 += " {0}".format(input_fastq1) if input_fastq2 is None: cmds.append(cmd1) else: cmd1 += " {0}".format(input_fastq2) cmds.append(cmd1) if input_fastq2 is None: cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed.fastq", output_fastq1) cmds.append(cmd2) else: cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed-pair1.fastq", output_fastq1) cmds.append(cmd2) cmd3 = "mv {0} {1}".format(output_prefix + "-trimmed-pair2.fastq", output_fastq2) cmds.append(cmd3) cmd4 = "mv {0} {1}".format(output_prefix + "-trimmed.log", log) cmds.append(cmd4) return cmds
python
def skewer( self, input_fastq1, output_prefix, output_fastq1, log, cpus, adapters, input_fastq2=None, output_fastq2=None): pe = input_fastq2 is not None mode = "pe" if pe else "any" cmds = list() cmd1 = self.tools.skewer + " --quiet" cmd1 += " -f sanger" cmd1 += " -t {0}".format(cpus) cmd1 += " -m {0}".format(mode) cmd1 += " -x {0}".format(adapters) cmd1 += " -o {0}".format(output_prefix) cmd1 += " {0}".format(input_fastq1) if input_fastq2 is None: cmds.append(cmd1) else: cmd1 += " {0}".format(input_fastq2) cmds.append(cmd1) if input_fastq2 is None: cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed.fastq", output_fastq1) cmds.append(cmd2) else: cmd2 = "mv {0} {1}".format(output_prefix + "-trimmed-pair1.fastq", output_fastq1) cmds.append(cmd2) cmd3 = "mv {0} {1}".format(output_prefix + "-trimmed-pair2.fastq", output_fastq2) cmds.append(cmd3) cmd4 = "mv {0} {1}".format(output_prefix + "-trimmed.log", log) cmds.append(cmd4) return cmds
[ "def", "skewer", "(", "self", ",", "input_fastq1", ",", "output_prefix", ",", "output_fastq1", ",", "log", ",", "cpus", ",", "adapters", ",", "input_fastq2", "=", "None", ",", "output_fastq2", "=", "None", ")", ":", "pe", "=", "input_fastq2", "is", "not", ...
Create commands with which to run skewer. :param str input_fastq1: Path to input (read 1) FASTQ file :param str output_prefix: Prefix for output FASTQ file names :param str output_fastq1: Path to (read 1) output FASTQ file :param str log: Path to file to which to write logging information :param int | str cpus: Number of processing cores to allow :param str adapters: Path to file with sequencing adapters :param str input_fastq2: Path to read 2 input FASTQ file :param str output_fastq2: Path to read 2 output FASTQ file :return list[str]: Sequence of commands to run to trim reads with skewer and rename files as desired.
[ "Create", "commands", "with", "which", "to", "run", "skewer", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1039-L1082
13,521
databio/pypiper
pypiper/ngstk.py
NGSTk.filter_reads
def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30): """ Remove duplicates, filter for >Q, remove multiple mapping reads. For paired-end reads, keep only proper pairs. """ nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam" cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file) cmd2 = self.tools.sambamba + ' view -t {0} -f bam --valid'.format(cpus) if paired: cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair' else: cmd2 += ' -F "not unmapped' cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"'.format(Q) cmd2 += ' {0} |'.format(nodups) cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam) cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups) cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai") return [cmd1, cmd2, cmd3, cmd4]
python
def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30): nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam" cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file) cmd2 = self.tools.sambamba + ' view -t {0} -f bam --valid'.format(cpus) if paired: cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair' else: cmd2 += ' -F "not unmapped' cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"'.format(Q) cmd2 += ' {0} |'.format(nodups) cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam) cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups) cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai") return [cmd1, cmd2, cmd3, cmd4]
[ "def", "filter_reads", "(", "self", ",", "input_bam", ",", "output_bam", ",", "metrics_file", ",", "paired", "=", "False", ",", "cpus", "=", "16", ",", "Q", "=", "30", ")", ":", "nodups", "=", "re", ".", "sub", "(", "\"\\.bam$\"", ",", "\"\"", ",", ...
Remove duplicates, filter for >Q, remove multiple mapping reads. For paired-end reads, keep only proper pairs.
[ "Remove", "duplicates", "filter", "for", ">", "Q", "remove", "multiple", "mapping", "reads", ".", "For", "paired", "-", "end", "reads", "keep", "only", "proper", "pairs", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1135-L1152
13,522
databio/pypiper
pypiper/ngstk.py
NGSTk.run_spp
def run_spp(self, input_bam, output, plot, cpus): """ Run the SPP read peak analysis tool. :param str input_bam: Path to reads file :param str output: Path to output file :param str plot: Path to plot file :param int cpus: Number of processors to use :return str: Command with which to run SPP """ base = "{} {} -rf -savp".format(self.tools.Rscript, self.tools.spp) cmd = base + " -savp={} -s=0:5:500 -c={} -out={} -p={}".format( plot, input_bam, output, cpus) return cmd
python
def run_spp(self, input_bam, output, plot, cpus): base = "{} {} -rf -savp".format(self.tools.Rscript, self.tools.spp) cmd = base + " -savp={} -s=0:5:500 -c={} -out={} -p={}".format( plot, input_bam, output, cpus) return cmd
[ "def", "run_spp", "(", "self", ",", "input_bam", ",", "output", ",", "plot", ",", "cpus", ")", ":", "base", "=", "\"{} {} -rf -savp\"", ".", "format", "(", "self", ".", "tools", ".", "Rscript", ",", "self", ".", "tools", ".", "spp", ")", "cmd", "=", ...
Run the SPP read peak analysis tool. :param str input_bam: Path to reads file :param str output: Path to output file :param str plot: Path to plot file :param int cpus: Number of processors to use :return str: Command with which to run SPP
[ "Run", "the", "SPP", "read", "peak", "analysis", "tool", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1176-L1189
13,523
databio/pypiper
pypiper/ngstk.py
NGSTk.calc_frip
def calc_frip(self, input_bam, input_bed, threads=4): """ Calculate fraction of reads in peaks. A file of with a pool of sequencing reads and a file with peak call regions define the operation that will be performed. Thread count for samtools can be specified as well. :param str input_bam: sequencing reads file :param str input_bed: file with called peak regions :param int threads: number of threads samtools may use :return float: fraction of reads in peaks defined in given peaks file """ cmd = self.simple_frip(input_bam, input_bed, threads) return subprocess.check_output(cmd.split(" "), shell=True)
python
def calc_frip(self, input_bam, input_bed, threads=4): cmd = self.simple_frip(input_bam, input_bed, threads) return subprocess.check_output(cmd.split(" "), shell=True)
[ "def", "calc_frip", "(", "self", ",", "input_bam", ",", "input_bed", ",", "threads", "=", "4", ")", ":", "cmd", "=", "self", ".", "simple_frip", "(", "input_bam", ",", "input_bed", ",", "threads", ")", "return", "subprocess", ".", "check_output", "(", "c...
Calculate fraction of reads in peaks. A file of with a pool of sequencing reads and a file with peak call regions define the operation that will be performed. Thread count for samtools can be specified as well. :param str input_bam: sequencing reads file :param str input_bed: file with called peak regions :param int threads: number of threads samtools may use :return float: fraction of reads in peaks defined in given peaks file
[ "Calculate", "fraction", "of", "reads", "in", "peaks", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1426-L1440
13,524
databio/pypiper
pypiper/ngstk.py
NGSTk.macs2_call_peaks
def macs2_call_peaks( self, treatment_bams, output_dir, sample_name, genome, control_bams=None, broad=False, paired=False, pvalue=None, qvalue=None, include_significance=None): """ Use MACS2 to call peaks. :param str | Iterable[str] treatment_bams: Paths to files with data to regard as treatment. :param str output_dir: Path to output folder. :param str sample_name: Name for the sample involved. :param str genome: Name of the genome assembly to use. :param str | Iterable[str] control_bams: Paths to files with data to regard as control :param bool broad: Whether to do broad peak calling. :param bool paired: Whether reads are paired-end :param float | NoneType pvalue: Statistical significance measure to pass as --pvalue to peak calling with MACS :param float | NoneType qvalue: Statistical significance measure to pass as --qvalue to peak calling with MACS :param bool | NoneType include_significance: Whether to pass a statistical significance argument to peak calling with MACS; if omitted, this will be True if the peak calling is broad or if either p-value or q-value is specified; default significance specification is a p-value of 0.001 if a significance is to be specified but no value is provided for p-value or q-value. :return str: Command to run. """ sizes = {"hg38": 2.7e9, "hg19": 2.7e9, "mm10": 1.87e9, "dr7": 1.412e9, "mm9": 1.87e9} # Whether to specify to MACS2 a value for statistical significance # can be either directly indicated, but if not, it's determined by # whether the mark is associated with broad peaks. By default, we # specify a significance value to MACS2 for a mark associated with a # broad peak. if include_significance is None: include_significance = broad cmd = self.tools.macs2 + " callpeak -t {0}".format(treatment_bams if type(treatment_bams) is str else " ".join(treatment_bams)) if control_bams is not None: cmd += " -c {0}".format(control_bams if type(control_bams) is str else " ".join(control_bams)) if paired: cmd += " -f BAMPE " # Additional settings based on whether the marks is associated with # broad peaks if broad: cmd += " --broad --nomodel --extsize 73" else: cmd += " --fix-bimodal --extsize 180 --bw 200" if include_significance: # Allow significance specification via either p- or q-value, # giving preference to q-value if both are provided but falling # back on a default p-value if neither is provided but inclusion # of statistical significance measure is desired. if qvalue is not None: cmd += " --qvalue {}".format(qvalue) else: cmd += " --pvalue {}".format(pvalue or 0.00001) cmd += " -g {0} -n {1} --outdir {2}".format(sizes[genome], sample_name, output_dir) return cmd
python
def macs2_call_peaks( self, treatment_bams, output_dir, sample_name, genome, control_bams=None, broad=False, paired=False, pvalue=None, qvalue=None, include_significance=None): sizes = {"hg38": 2.7e9, "hg19": 2.7e9, "mm10": 1.87e9, "dr7": 1.412e9, "mm9": 1.87e9} # Whether to specify to MACS2 a value for statistical significance # can be either directly indicated, but if not, it's determined by # whether the mark is associated with broad peaks. By default, we # specify a significance value to MACS2 for a mark associated with a # broad peak. if include_significance is None: include_significance = broad cmd = self.tools.macs2 + " callpeak -t {0}".format(treatment_bams if type(treatment_bams) is str else " ".join(treatment_bams)) if control_bams is not None: cmd += " -c {0}".format(control_bams if type(control_bams) is str else " ".join(control_bams)) if paired: cmd += " -f BAMPE " # Additional settings based on whether the marks is associated with # broad peaks if broad: cmd += " --broad --nomodel --extsize 73" else: cmd += " --fix-bimodal --extsize 180 --bw 200" if include_significance: # Allow significance specification via either p- or q-value, # giving preference to q-value if both are provided but falling # back on a default p-value if neither is provided but inclusion # of statistical significance measure is desired. if qvalue is not None: cmd += " --qvalue {}".format(qvalue) else: cmd += " --pvalue {}".format(pvalue or 0.00001) cmd += " -g {0} -n {1} --outdir {2}".format(sizes[genome], sample_name, output_dir) return cmd
[ "def", "macs2_call_peaks", "(", "self", ",", "treatment_bams", ",", "output_dir", ",", "sample_name", ",", "genome", ",", "control_bams", "=", "None", ",", "broad", "=", "False", ",", "paired", "=", "False", ",", "pvalue", "=", "None", ",", "qvalue", "=", ...
Use MACS2 to call peaks. :param str | Iterable[str] treatment_bams: Paths to files with data to regard as treatment. :param str output_dir: Path to output folder. :param str sample_name: Name for the sample involved. :param str genome: Name of the genome assembly to use. :param str | Iterable[str] control_bams: Paths to files with data to regard as control :param bool broad: Whether to do broad peak calling. :param bool paired: Whether reads are paired-end :param float | NoneType pvalue: Statistical significance measure to pass as --pvalue to peak calling with MACS :param float | NoneType qvalue: Statistical significance measure to pass as --qvalue to peak calling with MACS :param bool | NoneType include_significance: Whether to pass a statistical significance argument to peak calling with MACS; if omitted, this will be True if the peak calling is broad or if either p-value or q-value is specified; default significance specification is a p-value of 0.001 if a significance is to be specified but no value is provided for p-value or q-value. :return str: Command to run.
[ "Use", "MACS2", "to", "call", "peaks", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1458-L1522
13,525
databio/pypiper
pypiper/ngstk.py
NGSTk.spp_call_peaks
def spp_call_peaks( self, treatment_bam, control_bam, treatment_name, control_name, output_dir, broad, cpus, qvalue=None): """ Build command for R script to call peaks with SPP. :param str treatment_bam: Path to file with data for treatment sample. :param str control_bam: Path to file with data for control sample. :param str treatment_name: Name for the treatment sample. :param str control_name: Name for the control sample. :param str output_dir: Path to folder for output. :param str | bool broad: Whether to specify broad peak calling mode. :param int cpus: Number of cores the script may use. :param float qvalue: FDR, as decimal value :return str: Command to run. """ broad = "TRUE" if broad else "FALSE" cmd = self.tools.Rscript + " `which spp_peak_calling.R` {0} {1} {2} {3} {4} {5} {6}".format( treatment_bam, control_bam, treatment_name, control_name, broad, cpus, output_dir ) if qvalue is not None: cmd += " {}".format(qvalue) return cmd
python
def spp_call_peaks( self, treatment_bam, control_bam, treatment_name, control_name, output_dir, broad, cpus, qvalue=None): broad = "TRUE" if broad else "FALSE" cmd = self.tools.Rscript + " `which spp_peak_calling.R` {0} {1} {2} {3} {4} {5} {6}".format( treatment_bam, control_bam, treatment_name, control_name, broad, cpus, output_dir ) if qvalue is not None: cmd += " {}".format(qvalue) return cmd
[ "def", "spp_call_peaks", "(", "self", ",", "treatment_bam", ",", "control_bam", ",", "treatment_name", ",", "control_name", ",", "output_dir", ",", "broad", ",", "cpus", ",", "qvalue", "=", "None", ")", ":", "broad", "=", "\"TRUE\"", "if", "broad", "else", ...
Build command for R script to call peaks with SPP. :param str treatment_bam: Path to file with data for treatment sample. :param str control_bam: Path to file with data for control sample. :param str treatment_name: Name for the treatment sample. :param str control_name: Name for the control sample. :param str output_dir: Path to folder for output. :param str | bool broad: Whether to specify broad peak calling mode. :param int cpus: Number of cores the script may use. :param float qvalue: FDR, as decimal value :return str: Command to run.
[ "Build", "command", "for", "R", "script", "to", "call", "peaks", "with", "SPP", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1537-L1559
13,526
databio/pypiper
pypiper/ngstk.py
NGSTk.parse_duplicate_stats
def parse_duplicate_stats(self, stats_file): """ Parses sambamba markdup output, returns series with values. :param str stats_file: sambamba output file with duplicate statistics. """ import pandas as pd series = pd.Series() try: with open(stats_file) as handle: content = handle.readlines() # list of strings per line except: return series try: line = [i for i in range(len(content)) if "single ends (among them " in content[i]][0] series["single-ends"] = re.sub("\D", "", re.sub("\(.*", "", content[line])) line = [i for i in range(len(content)) if " end pairs... done in " in content[i]][0] series["paired-ends"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line])) line = [i for i in range(len(content)) if " duplicates, sorting the list... done in " in content[i]][0] series["duplicates"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line])) except IndexError: pass return series
python
def parse_duplicate_stats(self, stats_file): import pandas as pd series = pd.Series() try: with open(stats_file) as handle: content = handle.readlines() # list of strings per line except: return series try: line = [i for i in range(len(content)) if "single ends (among them " in content[i]][0] series["single-ends"] = re.sub("\D", "", re.sub("\(.*", "", content[line])) line = [i for i in range(len(content)) if " end pairs... done in " in content[i]][0] series["paired-ends"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line])) line = [i for i in range(len(content)) if " duplicates, sorting the list... done in " in content[i]][0] series["duplicates"] = re.sub("\D", "", re.sub("\.\.\..*", "", content[line])) except IndexError: pass return series
[ "def", "parse_duplicate_stats", "(", "self", ",", "stats_file", ")", ":", "import", "pandas", "as", "pd", "series", "=", "pd", ".", "Series", "(", ")", "try", ":", "with", "open", "(", "stats_file", ")", "as", "handle", ":", "content", "=", "handle", "...
Parses sambamba markdup output, returns series with values. :param str stats_file: sambamba output file with duplicate statistics.
[ "Parses", "sambamba", "markdup", "output", "returns", "series", "with", "values", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1661-L1683
13,527
databio/pypiper
pypiper/ngstk.py
NGSTk.get_peak_number
def get_peak_number(self, sample): """ Counts number of peaks from a sample's peak file. :param pipelines.Sample sample: Sample object with "peaks" attribute. """ proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE) out, err = proc.communicate() sample["peakNumber"] = re.sub("\D.*", "", out) return sample
python
def get_peak_number(self, sample): proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE) out, err = proc.communicate() sample["peakNumber"] = re.sub("\D.*", "", out) return sample
[ "def", "get_peak_number", "(", "self", ",", "sample", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"wc\"", ",", "\"-l\"", ",", "sample", ".", "peaks", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", ...
Counts number of peaks from a sample's peak file. :param pipelines.Sample sample: Sample object with "peaks" attribute.
[ "Counts", "number", "of", "peaks", "from", "a", "sample", "s", "peak", "file", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1706-L1715
13,528
databio/pypiper
pypiper/ngstk.py
NGSTk.get_frip
def get_frip(self, sample): """ Calculates the fraction of reads in peaks for a given sample. :param pipelines.Sample sample: Sample object with "peaks" attribute. """ import pandas as pd with open(sample.frip, "r") as handle: content = handle.readlines() reads_in_peaks = int(re.sub("\D", "", content[0])) mapped_reads = sample["readCount"] - sample["unaligned"] return pd.Series(reads_in_peaks / mapped_reads, index="FRiP")
python
def get_frip(self, sample): import pandas as pd with open(sample.frip, "r") as handle: content = handle.readlines() reads_in_peaks = int(re.sub("\D", "", content[0])) mapped_reads = sample["readCount"] - sample["unaligned"] return pd.Series(reads_in_peaks / mapped_reads, index="FRiP")
[ "def", "get_frip", "(", "self", ",", "sample", ")", ":", "import", "pandas", "as", "pd", "with", "open", "(", "sample", ".", "frip", ",", "\"r\"", ")", "as", "handle", ":", "content", "=", "handle", ".", "readlines", "(", ")", "reads_in_peaks", "=", ...
Calculates the fraction of reads in peaks for a given sample. :param pipelines.Sample sample: Sample object with "peaks" attribute.
[ "Calculates", "the", "fraction", "of", "reads", "in", "peaks", "for", "a", "given", "sample", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1718-L1729
13,529
databio/pypiper
pypiper/manager.py
PipelineManager._set_status_flag
def _set_status_flag(self, status): """ Configure state and files on disk to match current processing status. :param str status: Name of new status designation for pipeline. """ # Remove previous status flag file. flag_file_path = self._flag_file_path() try: os.remove(flag_file_path) except: # Print message only if the failure to remove the status flag # is unexpected; there's no flag for initialization, so we # can't remove the file. if self.status != "initializing": print("Could not remove flag file: '{}'".format(flag_file_path)) pass # Set new status. prev_status = self.status self.status = status self._create_file(self._flag_file_path()) print("\nChanged status from {} to {}.".format( prev_status, self.status))
python
def _set_status_flag(self, status): # Remove previous status flag file. flag_file_path = self._flag_file_path() try: os.remove(flag_file_path) except: # Print message only if the failure to remove the status flag # is unexpected; there's no flag for initialization, so we # can't remove the file. if self.status != "initializing": print("Could not remove flag file: '{}'".format(flag_file_path)) pass # Set new status. prev_status = self.status self.status = status self._create_file(self._flag_file_path()) print("\nChanged status from {} to {}.".format( prev_status, self.status))
[ "def", "_set_status_flag", "(", "self", ",", "status", ")", ":", "# Remove previous status flag file.", "flag_file_path", "=", "self", ".", "_flag_file_path", "(", ")", "try", ":", "os", ".", "remove", "(", "flag_file_path", ")", "except", ":", "# Print message on...
Configure state and files on disk to match current processing status. :param str status: Name of new status designation for pipeline.
[ "Configure", "state", "and", "files", "on", "disk", "to", "match", "current", "processing", "status", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L517-L541
13,530
databio/pypiper
pypiper/manager.py
PipelineManager._flag_file_path
def _flag_file_path(self, status=None): """ Create path to flag file based on indicated or current status. Internal variables used are the pipeline name and the designated pipeline output folder path. :param str status: flag file type to create, default to current status :return str: path to flag file of indicated or current status. """ flag_file_name = "{}_{}".format( self.name, flag_name(status or self.status)) return pipeline_filepath(self, filename=flag_file_name)
python
def _flag_file_path(self, status=None): flag_file_name = "{}_{}".format( self.name, flag_name(status or self.status)) return pipeline_filepath(self, filename=flag_file_name)
[ "def", "_flag_file_path", "(", "self", ",", "status", "=", "None", ")", ":", "flag_file_name", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "flag_name", "(", "status", "or", "self", ".", "status", ")", ")", "return", "pipeline_filepath",...
Create path to flag file based on indicated or current status. Internal variables used are the pipeline name and the designated pipeline output folder path. :param str status: flag file type to create, default to current status :return str: path to flag file of indicated or current status.
[ "Create", "path", "to", "flag", "file", "based", "on", "indicated", "or", "current", "status", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L544-L556
13,531
databio/pypiper
pypiper/manager.py
PipelineManager._attend_process
def _attend_process(self, proc, sleeptime): """ Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false """ # print("attend:{}".format(proc.pid)) try: proc.wait(timeout=sleeptime) except psutil.TimeoutExpired: return True return False
python
def _attend_process(self, proc, sleeptime): # print("attend:{}".format(proc.pid)) try: proc.wait(timeout=sleeptime) except psutil.TimeoutExpired: return True return False
[ "def", "_attend_process", "(", "self", ",", "proc", ",", "sleeptime", ")", ":", "# print(\"attend:{}\".format(proc.pid))", "try", ":", "proc", ".", "wait", "(", "timeout", "=", "sleeptime", ")", "except", "psutil", ".", "TimeoutExpired", ":", "return", "True", ...
Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false
[ "Waits", "on", "a", "process", "for", "a", "given", "time", "to", "see", "if", "it", "finishes", "returns", "True", "if", "it", "s", "still", "running", "after", "the", "given", "time", "or", "False", "as", "soon", "as", "it", "returns", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L810-L825
13,532
databio/pypiper
pypiper/manager.py
PipelineManager._wait_for_process
def _wait_for_process(self, p, shell=False): """ Debug function used in unit tests. :param p: A subprocess.Popen process. :param bool shell: If command requires should be run in its own shell. Optional. Default: False. """ local_maxmem = -1 sleeptime = .5 while p.poll() is None: if not shell: local_maxmem = max(local_maxmem, self._memory_usage(p.pid) / 1e6) # print("int.maxmem (pid:" + str(p.pid) + ") " + str(local_maxmem)) time.sleep(sleeptime) sleeptime = min(sleeptime + 5, 60) self.peak_memory = max(self.peak_memory, local_maxmem) del self.procs[p.pid] info = "Process " + str(p.pid) + " returned: (" + str(p.returncode) + ")." if not shell: info += " Peak memory: (Process: " + str(round(local_maxmem, 3)) + "GB;" info += " Pipeline: " + str(round(self.peak_memory, 3)) + "GB)\n" print(info + "\n") if p.returncode != 0: raise Exception("Process returned nonzero result.") return [p.returncode, local_maxmem]
python
def _wait_for_process(self, p, shell=False): local_maxmem = -1 sleeptime = .5 while p.poll() is None: if not shell: local_maxmem = max(local_maxmem, self._memory_usage(p.pid) / 1e6) # print("int.maxmem (pid:" + str(p.pid) + ") " + str(local_maxmem)) time.sleep(sleeptime) sleeptime = min(sleeptime + 5, 60) self.peak_memory = max(self.peak_memory, local_maxmem) del self.procs[p.pid] info = "Process " + str(p.pid) + " returned: (" + str(p.returncode) + ")." if not shell: info += " Peak memory: (Process: " + str(round(local_maxmem, 3)) + "GB;" info += " Pipeline: " + str(round(self.peak_memory, 3)) + "GB)\n" print(info + "\n") if p.returncode != 0: raise Exception("Process returned nonzero result.") return [p.returncode, local_maxmem]
[ "def", "_wait_for_process", "(", "self", ",", "p", ",", "shell", "=", "False", ")", ":", "local_maxmem", "=", "-", "1", "sleeptime", "=", ".5", "while", "p", ".", "poll", "(", ")", "is", "None", ":", "if", "not", "shell", ":", "local_maxmem", "=", ...
Debug function used in unit tests. :param p: A subprocess.Popen process. :param bool shell: If command requires should be run in its own shell. Optional. Default: False.
[ "Debug", "function", "used", "in", "unit", "tests", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L978-L1006
13,533
databio/pypiper
pypiper/manager.py
PipelineManager._wait_for_lock
def _wait_for_lock(self, lock_file): """ Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted :param str lock_file: Lock file to wait upon. """ sleeptime = .5 first_message_flag = False dot_count = 0 recover_file = self._recoverfile_from_lockfile(lock_file) while os.path.isfile(lock_file): if first_message_flag is False: self.timestamp("Waiting for file lock: " + lock_file) self._set_status_flag(WAIT_FLAG) first_message_flag = True else: sys.stdout.write(".") dot_count = dot_count + 1 if dot_count % 60 == 0: print("") # linefeed # prevents the issue of pypier waiting for the lock file to be gone infinitely # in case the recovery flag is sticked by other pipeline when it's interrupted if os.path.isfile(recover_file): sys.stdout.write(" Dynamic recovery flag found") break time.sleep(sleeptime) sleeptime = min(sleeptime + 2.5, 60) if first_message_flag: self.timestamp("File unlocked.") self._set_status_flag(RUN_FLAG)
python
def _wait_for_lock(self, lock_file): sleeptime = .5 first_message_flag = False dot_count = 0 recover_file = self._recoverfile_from_lockfile(lock_file) while os.path.isfile(lock_file): if first_message_flag is False: self.timestamp("Waiting for file lock: " + lock_file) self._set_status_flag(WAIT_FLAG) first_message_flag = True else: sys.stdout.write(".") dot_count = dot_count + 1 if dot_count % 60 == 0: print("") # linefeed # prevents the issue of pypier waiting for the lock file to be gone infinitely # in case the recovery flag is sticked by other pipeline when it's interrupted if os.path.isfile(recover_file): sys.stdout.write(" Dynamic recovery flag found") break time.sleep(sleeptime) sleeptime = min(sleeptime + 2.5, 60) if first_message_flag: self.timestamp("File unlocked.") self._set_status_flag(RUN_FLAG)
[ "def", "_wait_for_lock", "(", "self", ",", "lock_file", ")", ":", "sleeptime", "=", ".5", "first_message_flag", "=", "False", "dot_count", "=", "0", "recover_file", "=", "self", ".", "_recoverfile_from_lockfile", "(", "lock_file", ")", "while", "os", ".", "pat...
Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted :param str lock_file: Lock file to wait upon.
[ "Just", "sleep", "until", "the", "lock_file", "does", "not", "exist", "or", "a", "lock_file", "-", "related", "dynamic", "recovery", "flag", "is", "spotted" ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1009-L1039
13,534
databio/pypiper
pypiper/manager.py
PipelineManager.timestamp
def timestamp(self, message="", checkpoint=None, finished=False, raise_error=True): """ Print message, time, and time elapsed, perhaps creating checkpoint. This prints your given message, along with the current time, and time elapsed since the previous timestamp() call. If you specify a HEADING by beginning the message with "###", it surrounds the message with newlines for easier readability in the log file. If a checkpoint is designated, an empty file is created corresponding to the name given. Depending on how this manager's been configured, the value of the checkpoint, and whether this timestamp indicates initiation or completion of a group of pipeline steps, this call may stop the pipeline's execution. :param str message: Message to timestamp. :param str checkpoint: Name of checkpoint; this tends to be something that reflects the processing logic about to be or having just been completed. Provision of an argument to this parameter means that a checkpoint file will be created, facilitating arbitrary starting and stopping point for the pipeline as desired. :param bool finished: Whether this call represents the completion of a conceptual unit of a pipeline's processing :param raise_error: Whether to raise exception if checkpoint or current state indicates that a halt should occur. """ # Halt if the manager's state has been set such that this call # should halt the pipeline. if self.halt_on_next: self.halt(checkpoint, finished, raise_error=raise_error) # Determine action to take with respect to halting if needed. if checkpoint: if finished: # Write the file. self._checkpoint(checkpoint) self.prev_checkpoint = checkpoint self.curr_checkpoint = None else: self.prev_checkpoint = self.curr_checkpoint self.curr_checkpoint = checkpoint self._checkpoint(self.prev_checkpoint) # Handle the two halting conditions. if (finished and checkpoint == self.stop_after) or (not finished and checkpoint == self.stop_before): self.halt(checkpoint, finished, raise_error=raise_error) # Determine if we've started executing. elif checkpoint == self.start_point: self._active = True # If this is a prospective checkpoint, set the current checkpoint # accordingly and whether we should halt the pipeline on the # next timestamp call. if not finished and checkpoint == self.stop_after: self.halt_on_next = True elapsed = self.time_elapsed(self.last_timestamp) t = time.strftime("%m-%d %H:%M:%S") if checkpoint is None: msg = "{m} ({t}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, delta_t=elapsed) else: msg = "{m} ({t}) ({status} {stage}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, status="finished" if finished else "starting", stage=checkpoint, delta_t=elapsed) if re.match("^###", message): msg = "\n{}\n".format(msg) print(msg) self.last_timestamp = time.time()
python
def timestamp(self, message="", checkpoint=None, finished=False, raise_error=True): # Halt if the manager's state has been set such that this call # should halt the pipeline. if self.halt_on_next: self.halt(checkpoint, finished, raise_error=raise_error) # Determine action to take with respect to halting if needed. if checkpoint: if finished: # Write the file. self._checkpoint(checkpoint) self.prev_checkpoint = checkpoint self.curr_checkpoint = None else: self.prev_checkpoint = self.curr_checkpoint self.curr_checkpoint = checkpoint self._checkpoint(self.prev_checkpoint) # Handle the two halting conditions. if (finished and checkpoint == self.stop_after) or (not finished and checkpoint == self.stop_before): self.halt(checkpoint, finished, raise_error=raise_error) # Determine if we've started executing. elif checkpoint == self.start_point: self._active = True # If this is a prospective checkpoint, set the current checkpoint # accordingly and whether we should halt the pipeline on the # next timestamp call. if not finished and checkpoint == self.stop_after: self.halt_on_next = True elapsed = self.time_elapsed(self.last_timestamp) t = time.strftime("%m-%d %H:%M:%S") if checkpoint is None: msg = "{m} ({t}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, delta_t=elapsed) else: msg = "{m} ({t}) ({status} {stage}) elapsed: {delta_t} _TIME_".\ format(m=message, t=t, status="finished" if finished else "starting", stage=checkpoint, delta_t=elapsed) if re.match("^###", message): msg = "\n{}\n".format(msg) print(msg) self.last_timestamp = time.time()
[ "def", "timestamp", "(", "self", ",", "message", "=", "\"\"", ",", "checkpoint", "=", "None", ",", "finished", "=", "False", ",", "raise_error", "=", "True", ")", ":", "# Halt if the manager's state has been set such that this call", "# should halt the pipeline.", "if...
Print message, time, and time elapsed, perhaps creating checkpoint. This prints your given message, along with the current time, and time elapsed since the previous timestamp() call. If you specify a HEADING by beginning the message with "###", it surrounds the message with newlines for easier readability in the log file. If a checkpoint is designated, an empty file is created corresponding to the name given. Depending on how this manager's been configured, the value of the checkpoint, and whether this timestamp indicates initiation or completion of a group of pipeline steps, this call may stop the pipeline's execution. :param str message: Message to timestamp. :param str checkpoint: Name of checkpoint; this tends to be something that reflects the processing logic about to be or having just been completed. Provision of an argument to this parameter means that a checkpoint file will be created, facilitating arbitrary starting and stopping point for the pipeline as desired. :param bool finished: Whether this call represents the completion of a conceptual unit of a pipeline's processing :param raise_error: Whether to raise exception if checkpoint or current state indicates that a halt should occur.
[ "Print", "message", "time", "and", "time", "elapsed", "perhaps", "creating", "checkpoint", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1046-L1114
13,535
databio/pypiper
pypiper/manager.py
PipelineManager._report_profile
def _report_profile(self, command, lock_name, elapsed_time, memory): """ Writes a string to self.pipeline_profile_file. """ message_raw = str(command) + "\t " + \ str(lock_name) + "\t" + \ str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \ str(memory) with open(self.pipeline_profile_file, "a") as myfile: myfile.write(message_raw + "\n")
python
def _report_profile(self, command, lock_name, elapsed_time, memory): message_raw = str(command) + "\t " + \ str(lock_name) + "\t" + \ str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \ str(memory) with open(self.pipeline_profile_file, "a") as myfile: myfile.write(message_raw + "\n")
[ "def", "_report_profile", "(", "self", ",", "command", ",", "lock_name", ",", "elapsed_time", ",", "memory", ")", ":", "message_raw", "=", "str", "(", "command", ")", "+", "\"\\t \"", "+", "str", "(", "lock_name", ")", "+", "\"\\t\"", "+", "str", "(", ...
Writes a string to self.pipeline_profile_file.
[ "Writes", "a", "string", "to", "self", ".", "pipeline_profile_file", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1126-L1136
13,536
databio/pypiper
pypiper/manager.py
PipelineManager.report_object
def report_object(self, key, filename, anchor_text=None, anchor_image=None, annotation=None): """ Writes a string to self.pipeline_objects_file. Used to report figures and others. :param str key: name (key) of the object :param str filename: relative path to the file (relative to parent output dir) :param str anchor_text: text used as the link anchor test or caption to refer to the object. If not provided, defaults to the key. :param str anchor_image: a path to an HTML-displayable image thumbnail (so, .png or .jpg, for example). If a path, the path should be relative to the parent output dir. :param str annotation: By default, the figures will be annotated with the pipeline name, so you can tell which pipeline records which figures. If you want, you can change this. """ # Default annotation is current pipeline name. annotation = str(annotation or self.name) # In case the value is passed with trailing whitespace. filename = str(filename).strip() if anchor_text: anchor_text = str(anchor_text).strip() else: anchor_text = str(key).strip() # better to use a relative path in this file # convert any absolute paths into relative paths relative_filename = os.path.relpath(filename, self.outfolder) \ if os.path.isabs(filename) else filename if anchor_image: relative_anchor_image = os.path.relpath(anchor_image, self.outfolder) \ if os.path.isabs(anchor_image) else anchor_image else: relative_anchor_image = "None" message_raw = "{key}\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}".format( key=key, filename=relative_filename, anchor_text=anchor_text, anchor_image=relative_anchor_image, annotation=annotation) message_markdown = "> `{key}`\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}\t_OBJ_".format( key=key, filename=relative_filename, anchor_text=anchor_text, anchor_image=relative_anchor_image,annotation=annotation) print(message_markdown) self._safe_write_to_file(self.pipeline_objects_file, message_raw)
python
def report_object(self, key, filename, anchor_text=None, anchor_image=None, annotation=None): # Default annotation is current pipeline name. annotation = str(annotation or self.name) # In case the value is passed with trailing whitespace. filename = str(filename).strip() if anchor_text: anchor_text = str(anchor_text).strip() else: anchor_text = str(key).strip() # better to use a relative path in this file # convert any absolute paths into relative paths relative_filename = os.path.relpath(filename, self.outfolder) \ if os.path.isabs(filename) else filename if anchor_image: relative_anchor_image = os.path.relpath(anchor_image, self.outfolder) \ if os.path.isabs(anchor_image) else anchor_image else: relative_anchor_image = "None" message_raw = "{key}\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}".format( key=key, filename=relative_filename, anchor_text=anchor_text, anchor_image=relative_anchor_image, annotation=annotation) message_markdown = "> `{key}`\t{filename}\t{anchor_text}\t{anchor_image}\t{annotation}\t_OBJ_".format( key=key, filename=relative_filename, anchor_text=anchor_text, anchor_image=relative_anchor_image,annotation=annotation) print(message_markdown) self._safe_write_to_file(self.pipeline_objects_file, message_raw)
[ "def", "report_object", "(", "self", ",", "key", ",", "filename", ",", "anchor_text", "=", "None", ",", "anchor_image", "=", "None", ",", "annotation", "=", "None", ")", ":", "# Default annotation is current pipeline name.", "annotation", "=", "str", "(", "annot...
Writes a string to self.pipeline_objects_file. Used to report figures and others. :param str key: name (key) of the object :param str filename: relative path to the file (relative to parent output dir) :param str anchor_text: text used as the link anchor test or caption to refer to the object. If not provided, defaults to the key. :param str anchor_image: a path to an HTML-displayable image thumbnail (so, .png or .jpg, for example). If a path, the path should be relative to the parent output dir. :param str annotation: By default, the figures will be annotated with the pipeline name, so you can tell which pipeline records which figures. If you want, you can change this.
[ "Writes", "a", "string", "to", "self", ".", "pipeline_objects_file", ".", "Used", "to", "report", "figures", "and", "others", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1171-L1219
13,537
databio/pypiper
pypiper/manager.py
PipelineManager._create_file_racefree
def _create_file_racefree(self, file): """ Creates a file, but fails if the file already exists. This function will thus only succeed if this process actually creates the file; if the file already exists, it will cause an OSError, solving race conditions. :param str file: File to create. """ write_lock_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY os.open(file, write_lock_flags)
python
def _create_file_racefree(self, file): write_lock_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY os.open(file, write_lock_flags)
[ "def", "_create_file_racefree", "(", "self", ",", "file", ")", ":", "write_lock_flags", "=", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", "|", "os", ".", "O_WRONLY", "os", ".", "open", "(", "file", ",", "write_lock_flags", ")" ]
Creates a file, but fails if the file already exists. This function will thus only succeed if this process actually creates the file; if the file already exists, it will cause an OSError, solving race conditions. :param str file: File to create.
[ "Creates", "a", "file", "but", "fails", "if", "the", "file", "already", "exists", ".", "This", "function", "will", "thus", "only", "succeed", "if", "this", "process", "actually", "creates", "the", "file", ";", "if", "the", "file", "already", "exists", "it"...
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1289-L1300
13,538
databio/pypiper
pypiper/manager.py
PipelineManager.make_sure_path_exists
def make_sure_path_exists(self, path): """ Creates all directories in a path if it does not exist. :param str path: Path to create. :raises Exception: if the path creation attempt hits an error with a code indicating a cause other than pre-existence. """ try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise
python
def make_sure_path_exists(self, path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise
[ "def", "make_sure_path_exists", "(", "self", ",", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise" ]
Creates all directories in a path if it does not exist. :param str path: Path to create. :raises Exception: if the path creation attempt hits an error with a code indicating a cause other than pre-existence.
[ "Creates", "all", "directories", "in", "a", "path", "if", "it", "does", "not", "exist", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1344-L1356
13,539
databio/pypiper
pypiper/manager.py
PipelineManager._refresh_stats
def _refresh_stats(self): """ Loads up the stats sheet created for this pipeline run and reads those stats into memory """ # regex identifies all possible stats files. #regex = self.outfolder + "*_stats.tsv" #stats_files = glob.glob(regex) #stats_files.insert(self.pipeline_stats_file) # last one is the current pipeline #for stats_file in stats_files: stats_file = self.pipeline_stats_file if os.path.isfile(self.pipeline_stats_file): with open(stats_file, 'r') as stat_file: for line in stat_file: try: # Someone may have put something that's not 3 columns in the stats file # if so, shame on him, but we can just ignore it. key, value, annotation = line.split('\t') except ValueError: print("WARNING: Each row in a stats file is expected to have 3 columns") if annotation.rstrip() == self.name or annotation.rstrip() == "shared": self.stats_dict[key] = value.strip()
python
def _refresh_stats(self): # regex identifies all possible stats files. #regex = self.outfolder + "*_stats.tsv" #stats_files = glob.glob(regex) #stats_files.insert(self.pipeline_stats_file) # last one is the current pipeline #for stats_file in stats_files: stats_file = self.pipeline_stats_file if os.path.isfile(self.pipeline_stats_file): with open(stats_file, 'r') as stat_file: for line in stat_file: try: # Someone may have put something that's not 3 columns in the stats file # if so, shame on him, but we can just ignore it. key, value, annotation = line.split('\t') except ValueError: print("WARNING: Each row in a stats file is expected to have 3 columns") if annotation.rstrip() == self.name or annotation.rstrip() == "shared": self.stats_dict[key] = value.strip()
[ "def", "_refresh_stats", "(", "self", ")", ":", "# regex identifies all possible stats files.", "#regex = self.outfolder + \"*_stats.tsv\" ", "#stats_files = glob.glob(regex)", "#stats_files.insert(self.pipeline_stats_file) # last one is the current pipeline", "#for stats_file in stats_fi...
Loads up the stats sheet created for this pipeline run and reads those stats into memory
[ "Loads", "up", "the", "stats", "sheet", "created", "for", "this", "pipeline", "run", "and", "reads", "those", "stats", "into", "memory" ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1363-L1387
13,540
databio/pypiper
pypiper/manager.py
PipelineManager._checkpoint
def _checkpoint(self, stage): """ Decide whether to stop processing of a pipeline. This is the hook A pipeline can report various "checkpoints" as sort of status markers that designate the logical processing phase that's just been completed. The initiation of a pipeline can preordain one of those as a "stopping point" that when reached, should stop the pipeline's execution. :param pypiper.Stage | str stage: Pipeline processing stage/phase just completed. :return bool: Whether a checkpoint was created (i.e., whether it didn't already exist) :raise ValueError: If the stage is specified as an absolute filepath, and that path indicates a location that's not immediately within the main output folder, raise a ValueError. """ # For null stage, short-circuit and indicate no file write. # This handles case in which we're timestamping prospectively and # previously weren't in a stage. if stage is None: return False try: is_checkpoint = stage.checkpoint except AttributeError: # Maybe we have a raw function, not a stage. if hasattr(stage, "__call__"): stage = stage.__name__ else: # Maybe we have a stage name not a Stage. # In that case, we can proceed as-is, with downstream # processing handling Stage vs. stage name disambiguation. # Here, though, warn about inputs that appear filename/path-like. # We can't rely on raw text being a filepath or filename, # because that would ruin the ability to pass stage name rather # than actual stage. We can issue a warning message based on the # improbability of a stage name containing the '.' that would # be expected to characterize the extension of a file name/path. base, ext = os.path.splitext(stage) if ext and "." not in base: print("WARNING: '{}' looks like it may be the name or path of " "a file; for such a checkpoint, use touch_checkpoint.". format(stage)) else: if not is_checkpoint: print("Not a checkpoint: {}".format(stage)) return False stage = stage.name print("Checkpointing: '{}'".format(stage)) if os.path.isabs(stage): check_fpath = stage else: check_fpath = checkpoint_filepath(stage, pm=self) return self._touch_checkpoint(check_fpath)
python
def _checkpoint(self, stage): # For null stage, short-circuit and indicate no file write. # This handles case in which we're timestamping prospectively and # previously weren't in a stage. if stage is None: return False try: is_checkpoint = stage.checkpoint except AttributeError: # Maybe we have a raw function, not a stage. if hasattr(stage, "__call__"): stage = stage.__name__ else: # Maybe we have a stage name not a Stage. # In that case, we can proceed as-is, with downstream # processing handling Stage vs. stage name disambiguation. # Here, though, warn about inputs that appear filename/path-like. # We can't rely on raw text being a filepath or filename, # because that would ruin the ability to pass stage name rather # than actual stage. We can issue a warning message based on the # improbability of a stage name containing the '.' that would # be expected to characterize the extension of a file name/path. base, ext = os.path.splitext(stage) if ext and "." not in base: print("WARNING: '{}' looks like it may be the name or path of " "a file; for such a checkpoint, use touch_checkpoint.". format(stage)) else: if not is_checkpoint: print("Not a checkpoint: {}".format(stage)) return False stage = stage.name print("Checkpointing: '{}'".format(stage)) if os.path.isabs(stage): check_fpath = stage else: check_fpath = checkpoint_filepath(stage, pm=self) return self._touch_checkpoint(check_fpath)
[ "def", "_checkpoint", "(", "self", ",", "stage", ")", ":", "# For null stage, short-circuit and indicate no file write.", "# This handles case in which we're timestamping prospectively and", "# previously weren't in a stage.", "if", "stage", "is", "None", ":", "return", "False", ...
Decide whether to stop processing of a pipeline. This is the hook A pipeline can report various "checkpoints" as sort of status markers that designate the logical processing phase that's just been completed. The initiation of a pipeline can preordain one of those as a "stopping point" that when reached, should stop the pipeline's execution. :param pypiper.Stage | str stage: Pipeline processing stage/phase just completed. :return bool: Whether a checkpoint was created (i.e., whether it didn't already exist) :raise ValueError: If the stage is specified as an absolute filepath, and that path indicates a location that's not immediately within the main output folder, raise a ValueError.
[ "Decide", "whether", "to", "stop", "processing", "of", "a", "pipeline", ".", "This", "is", "the", "hook" ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1421-L1476
13,541
databio/pypiper
pypiper/manager.py
PipelineManager._touch_checkpoint
def _touch_checkpoint(self, check_file): """ Alternative way for a pipeline to designate a checkpoint. :param str check_file: Name or path of file to use as checkpoint. :return bool: Whether a file was written (equivalent to whether the checkpoint file already existed). :raise ValueError: Raise a ValueError if the argument provided as the checkpoint file is an absolute path and that doesn't correspond to a location within the main output folder. """ if os.path.isabs(check_file): folder, _ = os.path.split(check_file) # For raw string comparison, ensure that each path # bears the final path separator. other_folder = os.path.join(folder, "") this_folder = os.path.join(self.outfolder, "") if other_folder != this_folder: errmsg = "Path provided as checkpoint file isn't in pipeline " \ "output folder. '{}' is not in '{}'".format( check_file, self.outfolder) raise ValueError(errmsg) fpath = check_file else: fpath = pipeline_filepath(self, filename=check_file) # Create/update timestamp for checkpoint, but base return value on # whether the action was a simple update or a novel creation. already_exists = os.path.isfile(fpath) open(fpath, 'w').close() action = "Updated" if already_exists else "Created" print("{} checkpoint file: '{}'".format(action, fpath)) return already_exists
python
def _touch_checkpoint(self, check_file): if os.path.isabs(check_file): folder, _ = os.path.split(check_file) # For raw string comparison, ensure that each path # bears the final path separator. other_folder = os.path.join(folder, "") this_folder = os.path.join(self.outfolder, "") if other_folder != this_folder: errmsg = "Path provided as checkpoint file isn't in pipeline " \ "output folder. '{}' is not in '{}'".format( check_file, self.outfolder) raise ValueError(errmsg) fpath = check_file else: fpath = pipeline_filepath(self, filename=check_file) # Create/update timestamp for checkpoint, but base return value on # whether the action was a simple update or a novel creation. already_exists = os.path.isfile(fpath) open(fpath, 'w').close() action = "Updated" if already_exists else "Created" print("{} checkpoint file: '{}'".format(action, fpath)) return already_exists
[ "def", "_touch_checkpoint", "(", "self", ",", "check_file", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "check_file", ")", ":", "folder", ",", "_", "=", "os", ".", "path", ".", "split", "(", "check_file", ")", "# For raw string comparison, ensure...
Alternative way for a pipeline to designate a checkpoint. :param str check_file: Name or path of file to use as checkpoint. :return bool: Whether a file was written (equivalent to whether the checkpoint file already existed). :raise ValueError: Raise a ValueError if the argument provided as the checkpoint file is an absolute path and that doesn't correspond to a location within the main output folder.
[ "Alternative", "way", "for", "a", "pipeline", "to", "designate", "a", "checkpoint", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1479-L1512
13,542
databio/pypiper
pypiper/manager.py
PipelineManager.fail_pipeline
def fail_pipeline(self, e, dynamic_recover=False): """ If the pipeline does not complete, this function will stop the pipeline gracefully. It sets the status flag to failed and skips the normal success completion procedure. :param Exception e: Exception to raise. :param bool dynamic_recover: Whether to recover e.g. for job termination. """ # Take care of any active running subprocess sys.stdout.flush() self._terminate_running_subprocesses() if dynamic_recover: # job was terminated, not failed due to a bad process. # flag this run as recoverable. if len(self.locks) < 1: # If there is no process locked, then recovery will be automatic. print("No locked process. Dynamic recovery will be automatic.") # make a copy of self.locks to iterate over since we'll be clearing them as we go # set a recovery flag for each lock. for lock_file in self.locks[:]: recover_file = self._recoverfile_from_lockfile(lock_file) print("Setting dynamic recover file: {}".format(recover_file)) self._create_file(recover_file) self.locks.remove(lock_file) # Produce cleanup script self._cleanup(dry_run=True) # Finally, set the status to failed and close out with a timestamp if not self._failed: # and not self._completed: self.timestamp("### Pipeline failed at: ") total_time = datetime.timedelta(seconds=self.time_elapsed(self.starttime)) print("Total time: " + str(total_time)) self._set_status_flag(FAIL_FLAG) raise e
python
def fail_pipeline(self, e, dynamic_recover=False): # Take care of any active running subprocess sys.stdout.flush() self._terminate_running_subprocesses() if dynamic_recover: # job was terminated, not failed due to a bad process. # flag this run as recoverable. if len(self.locks) < 1: # If there is no process locked, then recovery will be automatic. print("No locked process. Dynamic recovery will be automatic.") # make a copy of self.locks to iterate over since we'll be clearing them as we go # set a recovery flag for each lock. for lock_file in self.locks[:]: recover_file = self._recoverfile_from_lockfile(lock_file) print("Setting dynamic recover file: {}".format(recover_file)) self._create_file(recover_file) self.locks.remove(lock_file) # Produce cleanup script self._cleanup(dry_run=True) # Finally, set the status to failed and close out with a timestamp if not self._failed: # and not self._completed: self.timestamp("### Pipeline failed at: ") total_time = datetime.timedelta(seconds=self.time_elapsed(self.starttime)) print("Total time: " + str(total_time)) self._set_status_flag(FAIL_FLAG) raise e
[ "def", "fail_pipeline", "(", "self", ",", "e", ",", "dynamic_recover", "=", "False", ")", ":", "# Take care of any active running subprocess", "sys", ".", "stdout", ".", "flush", "(", ")", "self", ".", "_terminate_running_subprocesses", "(", ")", "if", "dynamic_re...
If the pipeline does not complete, this function will stop the pipeline gracefully. It sets the status flag to failed and skips the normal success completion procedure. :param Exception e: Exception to raise. :param bool dynamic_recover: Whether to recover e.g. for job termination.
[ "If", "the", "pipeline", "does", "not", "complete", "this", "function", "will", "stop", "the", "pipeline", "gracefully", ".", "It", "sets", "the", "status", "flag", "to", "failed", "and", "skips", "the", "normal", "success", "completion", "procedure", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1520-L1556
13,543
databio/pypiper
pypiper/manager.py
PipelineManager.halt
def halt(self, checkpoint=None, finished=False, raise_error=True): """ Stop the pipeline before completion point. :param str checkpoint: Name of stage just reached or just completed. :param bool finished: Whether the indicated stage was just finished (True), or just reached (False) :param bool raise_error: Whether to raise an exception to truly halt execution. """ self.stop_pipeline(PAUSE_FLAG) self._active = False if raise_error: raise PipelineHalt(checkpoint, finished)
python
def halt(self, checkpoint=None, finished=False, raise_error=True): self.stop_pipeline(PAUSE_FLAG) self._active = False if raise_error: raise PipelineHalt(checkpoint, finished)
[ "def", "halt", "(", "self", ",", "checkpoint", "=", "None", ",", "finished", "=", "False", ",", "raise_error", "=", "True", ")", ":", "self", ".", "stop_pipeline", "(", "PAUSE_FLAG", ")", "self", ".", "_active", "=", "False", "if", "raise_error", ":", ...
Stop the pipeline before completion point. :param str checkpoint: Name of stage just reached or just completed. :param bool finished: Whether the indicated stage was just finished (True), or just reached (False) :param bool raise_error: Whether to raise an exception to truly halt execution.
[ "Stop", "the", "pipeline", "before", "completion", "point", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1559-L1572
13,544
databio/pypiper
pypiper/manager.py
PipelineManager.stop_pipeline
def stop_pipeline(self, status=COMPLETE_FLAG): """ Terminate the pipeline. This is the "healthy" pipeline completion function. The normal pipeline completion function, to be run by the pipeline at the end of the script. It sets status flag to completed and records some time and memory statistics to the log file. """ self._set_status_flag(status) self._cleanup() self.report_result("Time", str(datetime.timedelta(seconds=self.time_elapsed(self.starttime)))) self.report_result("Success", time.strftime("%m-%d-%H:%M:%S")) print("\n##### [Epilogue:]") print("* " + "Total elapsed time".rjust(20) + ": " + str(datetime.timedelta(seconds=self.time_elapsed(self.starttime)))) # print("Peak memory used: " + str(memory_usage()["peak"]) + "kb") print("* " + "Peak memory used".rjust(20) + ": " + str(round(self.peak_memory, 2)) + " GB") if self.halted: return self.timestamp("* Pipeline completed at: ".rjust(20))
python
def stop_pipeline(self, status=COMPLETE_FLAG): self._set_status_flag(status) self._cleanup() self.report_result("Time", str(datetime.timedelta(seconds=self.time_elapsed(self.starttime)))) self.report_result("Success", time.strftime("%m-%d-%H:%M:%S")) print("\n##### [Epilogue:]") print("* " + "Total elapsed time".rjust(20) + ": " + str(datetime.timedelta(seconds=self.time_elapsed(self.starttime)))) # print("Peak memory used: " + str(memory_usage()["peak"]) + "kb") print("* " + "Peak memory used".rjust(20) + ": " + str(round(self.peak_memory, 2)) + " GB") if self.halted: return self.timestamp("* Pipeline completed at: ".rjust(20))
[ "def", "stop_pipeline", "(", "self", ",", "status", "=", "COMPLETE_FLAG", ")", ":", "self", ".", "_set_status_flag", "(", "status", ")", "self", ".", "_cleanup", "(", ")", "self", ".", "report_result", "(", "\"Time\"", ",", "str", "(", "datetime", ".", "...
Terminate the pipeline. This is the "healthy" pipeline completion function. The normal pipeline completion function, to be run by the pipeline at the end of the script. It sets status flag to completed and records some time and memory statistics to the log file.
[ "Terminate", "the", "pipeline", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1575-L1594
13,545
databio/pypiper
pypiper/manager.py
PipelineManager._generic_signal_handler
def _generic_signal_handler(self, signal_type): """ Function for handling both SIGTERM and SIGINT """ print("</pre>") message = "Got " + signal_type + ". Failing gracefully..." self.timestamp(message) self.fail_pipeline(KeyboardInterrupt(signal_type), dynamic_recover=True) sys.exit(1)
python
def _generic_signal_handler(self, signal_type): print("</pre>") message = "Got " + signal_type + ". Failing gracefully..." self.timestamp(message) self.fail_pipeline(KeyboardInterrupt(signal_type), dynamic_recover=True) sys.exit(1)
[ "def", "_generic_signal_handler", "(", "self", ",", "signal_type", ")", ":", "print", "(", "\"</pre>\"", ")", "message", "=", "\"Got \"", "+", "signal_type", "+", "\". Failing gracefully...\"", "self", ".", "timestamp", "(", "message", ")", "self", ".", "fail_pi...
Function for handling both SIGTERM and SIGINT
[ "Function", "for", "handling", "both", "SIGTERM", "and", "SIGINT" ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1609-L1617
13,546
databio/pypiper
pypiper/manager.py
PipelineManager._exit_handler
def _exit_handler(self): """ This function I register with atexit to run whenever the script is completing. A catch-all for uncaught exceptions, setting status flag file to failed. """ # TODO: consider handling sys.stderr/sys.stdout exceptions related to # TODO (cont.): order of interpreter vs. subprocess shutdown signal receipt. # TODO (cont.): see https://bugs.python.org/issue11380 # Make the cleanup file executable if it exists if os.path.isfile(self.cleanup_file): # Make the cleanup file self destruct. with open(self.cleanup_file, "a") as myfile: myfile.write("rm " + self.cleanup_file + "\n") os.chmod(self.cleanup_file, 0o755) # If the pipeline hasn't completed successfully, or already been marked # as failed, then mark it as failed now. if not self._has_exit_status: print("Pipeline status: {}".format(self.status)) self.fail_pipeline(Exception("Pipeline failure. See details above.")) if self.tee: self.tee.kill()
python
def _exit_handler(self): # TODO: consider handling sys.stderr/sys.stdout exceptions related to # TODO (cont.): order of interpreter vs. subprocess shutdown signal receipt. # TODO (cont.): see https://bugs.python.org/issue11380 # Make the cleanup file executable if it exists if os.path.isfile(self.cleanup_file): # Make the cleanup file self destruct. with open(self.cleanup_file, "a") as myfile: myfile.write("rm " + self.cleanup_file + "\n") os.chmod(self.cleanup_file, 0o755) # If the pipeline hasn't completed successfully, or already been marked # as failed, then mark it as failed now. if not self._has_exit_status: print("Pipeline status: {}".format(self.status)) self.fail_pipeline(Exception("Pipeline failure. See details above.")) if self.tee: self.tee.kill()
[ "def", "_exit_handler", "(", "self", ")", ":", "# TODO: consider handling sys.stderr/sys.stdout exceptions related to", "# TODO (cont.): order of interpreter vs. subprocess shutdown signal receipt.", "# TODO (cont.): see https://bugs.python.org/issue11380", "# Make the cleanup file executable if i...
This function I register with atexit to run whenever the script is completing. A catch-all for uncaught exceptions, setting status flag file to failed.
[ "This", "function", "I", "register", "with", "atexit", "to", "run", "whenever", "the", "script", "is", "completing", ".", "A", "catch", "-", "all", "for", "uncaught", "exceptions", "setting", "status", "flag", "file", "to", "failed", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1638-L1664
13,547
databio/pypiper
pypiper/manager.py
PipelineManager._kill_child_process
def _kill_child_process(self, child_pid, proc_name=None): """ Pypiper spawns subprocesses. We need to kill them to exit gracefully, in the event of a pipeline termination or interrupt signal. By default, child processes are not automatically killed when python terminates, so Pypiper must clean these up manually. Given a process ID, this function just kills it. :param int child_pid: Child process id. """ # When we kill process, it turns into a zombie, and we have to reap it. # So we can't just kill it and then let it go; we call wait def pskill(proc_pid, sig=signal.SIGINT): parent_process = psutil.Process(proc_pid) for child_proc in parent_process.children(recursive=True): child_proc.send_signal(sig) parent_process.send_signal(sig) if child_pid is None: return if proc_name: proc_string = " ({proc_name})".format(proc_name=proc_name) # First a gentle kill sys.stdout.flush() still_running = self._attend_process(psutil.Process(child_pid), 0) sleeptime = .25 time_waiting = 0 while still_running and time_waiting < 3: try: if time_waiting > 2: pskill(child_pid, signal.SIGKILL) # print("pskill("+str(child_pid)+", signal.SIGKILL)") elif time_waiting > 1: pskill(child_pid, signal.SIGTERM) # print("pskill("+str(child_pid)+", signal.SIGTERM)") else: pskill(child_pid, signal.SIGINT) # print("pskill("+str(child_pid)+", signal.SIGINT)") except OSError: # This would happen if the child process ended between the check # and the next kill step still_running = False time_waiting = time_waiting + sleeptime # Now see if it's still running time_waiting = time_waiting + sleeptime if not self._attend_process(psutil.Process(child_pid), sleeptime): still_running = False if still_running: # still running!? print("Child process {child_pid}{proc_string} never responded" "I just can't take it anymore. I don't know what to do...".format(child_pid=child_pid, proc_string=proc_string)) else: if time_waiting > 0: note = "terminated after {time} sec".format(time=int(time_waiting)) else: note = "was already terminated" msg = "Child process {child_pid}{proc_string} {note}.".format( child_pid=child_pid, proc_string=proc_string, note=note) print(msg)
python
def _kill_child_process(self, child_pid, proc_name=None): # When we kill process, it turns into a zombie, and we have to reap it. # So we can't just kill it and then let it go; we call wait def pskill(proc_pid, sig=signal.SIGINT): parent_process = psutil.Process(proc_pid) for child_proc in parent_process.children(recursive=True): child_proc.send_signal(sig) parent_process.send_signal(sig) if child_pid is None: return if proc_name: proc_string = " ({proc_name})".format(proc_name=proc_name) # First a gentle kill sys.stdout.flush() still_running = self._attend_process(psutil.Process(child_pid), 0) sleeptime = .25 time_waiting = 0 while still_running and time_waiting < 3: try: if time_waiting > 2: pskill(child_pid, signal.SIGKILL) # print("pskill("+str(child_pid)+", signal.SIGKILL)") elif time_waiting > 1: pskill(child_pid, signal.SIGTERM) # print("pskill("+str(child_pid)+", signal.SIGTERM)") else: pskill(child_pid, signal.SIGINT) # print("pskill("+str(child_pid)+", signal.SIGINT)") except OSError: # This would happen if the child process ended between the check # and the next kill step still_running = False time_waiting = time_waiting + sleeptime # Now see if it's still running time_waiting = time_waiting + sleeptime if not self._attend_process(psutil.Process(child_pid), sleeptime): still_running = False if still_running: # still running!? print("Child process {child_pid}{proc_string} never responded" "I just can't take it anymore. I don't know what to do...".format(child_pid=child_pid, proc_string=proc_string)) else: if time_waiting > 0: note = "terminated after {time} sec".format(time=int(time_waiting)) else: note = "was already terminated" msg = "Child process {child_pid}{proc_string} {note}.".format( child_pid=child_pid, proc_string=proc_string, note=note) print(msg)
[ "def", "_kill_child_process", "(", "self", ",", "child_pid", ",", "proc_name", "=", "None", ")", ":", "# When we kill process, it turns into a zombie, and we have to reap it.", "# So we can't just kill it and then let it go; we call wait", "def", "pskill", "(", "proc_pid", ",", ...
Pypiper spawns subprocesses. We need to kill them to exit gracefully, in the event of a pipeline termination or interrupt signal. By default, child processes are not automatically killed when python terminates, so Pypiper must clean these up manually. Given a process ID, this function just kills it. :param int child_pid: Child process id.
[ "Pypiper", "spawns", "subprocesses", ".", "We", "need", "to", "kill", "them", "to", "exit", "gracefully", "in", "the", "event", "of", "a", "pipeline", "termination", "or", "interrupt", "signal", ".", "By", "default", "child", "processes", "are", "not", "auto...
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1681-L1751
13,548
databio/pypiper
pypiper/manager.py
PipelineManager._memory_usage
def _memory_usage(self, pid='self', category="hwm", container=None): """ Memory usage of the process in kilobytes. :param str pid: Process ID of process to check :param str category: Memory type to check. 'hwm' for high water mark. """ if container: # TODO: Put some debug output here with switch to Logger # since this is relatively untested. cmd = "docker stats " + container + " --format '{{.MemUsage}}' --no-stream" mem_use_str = subprocess.check_output(cmd, shell=True) mem_use = mem_use_str.split("/")[0].split() mem_num = re.findall('[\d\.]+', mem_use_str.split("/")[0])[0] mem_scale = re.findall('[A-Za-z]+', mem_use_str.split("/")[0])[0] #print(mem_use_str, mem_num, mem_scale) mem_num = float(mem_num) if mem_scale == "GiB": return mem_num * 1e6 elif mem_scale == "MiB": return mem_num * 1e3 elif mem_scale == "KiB": return mem_num else: # What type is this? return 0 # Thanks Martin Geisler: status = None result = {'peak': 0, 'rss': 0, 'hwm': 0} try: # This will only work on systems with a /proc file system # (like Linux). # status = open('/proc/self/status') proc_spot = '/proc/%s/status' % pid status = open(proc_spot) for line in status: parts = line.split() key = parts[0][2:-1].lower() if key in result: result[key] = int(parts[1]) except: return 0 finally: if status is not None: status.close() # print(result[category]) return result[category]
python
def _memory_usage(self, pid='self', category="hwm", container=None): if container: # TODO: Put some debug output here with switch to Logger # since this is relatively untested. cmd = "docker stats " + container + " --format '{{.MemUsage}}' --no-stream" mem_use_str = subprocess.check_output(cmd, shell=True) mem_use = mem_use_str.split("/")[0].split() mem_num = re.findall('[\d\.]+', mem_use_str.split("/")[0])[0] mem_scale = re.findall('[A-Za-z]+', mem_use_str.split("/")[0])[0] #print(mem_use_str, mem_num, mem_scale) mem_num = float(mem_num) if mem_scale == "GiB": return mem_num * 1e6 elif mem_scale == "MiB": return mem_num * 1e3 elif mem_scale == "KiB": return mem_num else: # What type is this? return 0 # Thanks Martin Geisler: status = None result = {'peak': 0, 'rss': 0, 'hwm': 0} try: # This will only work on systems with a /proc file system # (like Linux). # status = open('/proc/self/status') proc_spot = '/proc/%s/status' % pid status = open(proc_spot) for line in status: parts = line.split() key = parts[0][2:-1].lower() if key in result: result[key] = int(parts[1]) except: return 0 finally: if status is not None: status.close() # print(result[category]) return result[category]
[ "def", "_memory_usage", "(", "self", ",", "pid", "=", "'self'", ",", "category", "=", "\"hwm\"", ",", "container", "=", "None", ")", ":", "if", "container", ":", "# TODO: Put some debug output here with switch to Logger", "# since this is relatively untested.", "cmd", ...
Memory usage of the process in kilobytes. :param str pid: Process ID of process to check :param str category: Memory type to check. 'hwm' for high water mark.
[ "Memory", "usage", "of", "the", "process", "in", "kilobytes", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1914-L1965
13,549
databio/pypiper
pypiper/manager.py
PipelineManager._triage_error
def _triage_error(self, e, nofail): """ Print a message and decide what to do about an error. """ if not nofail: self.fail_pipeline(e) elif self._failed: print("This is a nofail process, but the pipeline was terminated for other reasons, so we fail.") raise e else: print(e) print("ERROR: Subprocess returned nonzero result, but pipeline is continuing because nofail=True")
python
def _triage_error(self, e, nofail): if not nofail: self.fail_pipeline(e) elif self._failed: print("This is a nofail process, but the pipeline was terminated for other reasons, so we fail.") raise e else: print(e) print("ERROR: Subprocess returned nonzero result, but pipeline is continuing because nofail=True")
[ "def", "_triage_error", "(", "self", ",", "e", ",", "nofail", ")", ":", "if", "not", "nofail", ":", "self", ".", "fail_pipeline", "(", "e", ")", "elif", "self", ".", "_failed", ":", "print", "(", "\"This is a nofail process, but the pipeline was terminated for o...
Print a message and decide what to do about an error.
[ "Print", "a", "message", "and", "decide", "what", "to", "do", "about", "an", "error", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1967-L1976
13,550
databio/pypiper
setup.py
read_reqs_file
def read_reqs_file(reqs_name): """ Read requirements file for given requirements group. """ path_reqs_file = os.path.join( "requirements", "reqs-{}.txt".format(reqs_name)) with open(path_reqs_file, 'r') as reqs_file: return [pkg.rstrip() for pkg in reqs_file.readlines() if not pkg.startswith("#")]
python
def read_reqs_file(reqs_name): path_reqs_file = os.path.join( "requirements", "reqs-{}.txt".format(reqs_name)) with open(path_reqs_file, 'r') as reqs_file: return [pkg.rstrip() for pkg in reqs_file.readlines() if not pkg.startswith("#")]
[ "def", "read_reqs_file", "(", "reqs_name", ")", ":", "path_reqs_file", "=", "os", ".", "path", ".", "join", "(", "\"requirements\"", ",", "\"reqs-{}.txt\"", ".", "format", "(", "reqs_name", ")", ")", "with", "open", "(", "path_reqs_file", ",", "'r'", ")", ...
Read requirements file for given requirements group.
[ "Read", "requirements", "file", "for", "given", "requirements", "group", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/setup.py#L20-L26
13,551
databio/pypiper
pypiper/pipeline.py
_is_unordered
def _is_unordered(collection): """ Determine whether a collection appears to be unordered. This is a conservative implementation, allowing for the possibility that someone's implemented Mapping or Set, for example, and provided an __iter__ implementation that defines a consistent ordering of the collection's elements. :param object collection: Object to check as an unordered collection. :return bool: Whether the given object appears to be unordered :raises TypeError: If the given "collection" is non-iterable, it's illogical to investigate whether it's ordered. """ if not isinstance(collection, Iterable): raise TypeError("Non-iterable alleged collection: {}". format(type(collection))) return isinstance(collection, set) or \ (isinstance(collection, dict) and not isinstance(collection, OrderedDict))
python
def _is_unordered(collection): if not isinstance(collection, Iterable): raise TypeError("Non-iterable alleged collection: {}". format(type(collection))) return isinstance(collection, set) or \ (isinstance(collection, dict) and not isinstance(collection, OrderedDict))
[ "def", "_is_unordered", "(", "collection", ")", ":", "if", "not", "isinstance", "(", "collection", ",", "Iterable", ")", ":", "raise", "TypeError", "(", "\"Non-iterable alleged collection: {}\"", ".", "format", "(", "type", "(", "collection", ")", ")", ")", "r...
Determine whether a collection appears to be unordered. This is a conservative implementation, allowing for the possibility that someone's implemented Mapping or Set, for example, and provided an __iter__ implementation that defines a consistent ordering of the collection's elements. :param object collection: Object to check as an unordered collection. :return bool: Whether the given object appears to be unordered :raises TypeError: If the given "collection" is non-iterable, it's illogical to investigate whether it's ordered.
[ "Determine", "whether", "a", "collection", "appears", "to", "be", "unordered", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L382-L401
13,552
databio/pypiper
pypiper/pipeline.py
_parse_stage_spec
def _parse_stage_spec(stage_spec): """ Handle alternate Stage specifications, returning name and Stage. Isolate this parsing logic from any iteration. TypeError as single exception type funnel also provides a more uniform way for callers to handle specification errors (e.g., skip a stage, warn, re-raise, etc.) :param (str, pypiper.Stage) | callable stage_spec: name and Stage :return (name, pypiper.Stage): Pair of name and Stage instance from parsing input specification :raise TypeError: if the specification of the stage is not a supported type """ # The logic used here, a message to a user about how to specify Stage. req_msg = "Stage specification must be either a {0} itself, a " \ "(<name>, {0}) pair, or a callable with a __name__ attribute " \ "(e.g., a non-anonymous function)".format(Stage.__name__) # Simplest case is stage itself. if isinstance(stage_spec, Stage): return stage_spec.name, stage_spec # Handle alternate forms of specification. try: # Unpack pair of name and stage, requiring name first. name, stage = stage_spec except (TypeError, ValueError): # Normally, this sort of unpacking issue create a ValueError. Here, # though, we also need to catch TypeError since that's what arises # if an attempt is made to unpack a single function. # Attempt to parse stage_spec as a single named callable. try: name = stage_spec.__name__ except AttributeError: raise TypeError(req_msg) else: # Control flow here indicates an anonymous function that was not # paired with a name. Prohibit that. if name == (lambda: None).__name__: raise TypeError(req_msg) stage = stage_spec # Ensure that the stage is callable. if not hasattr(stage, "__call__"): raise TypeError(req_msg) return name, Stage(stage, name=name)
python
def _parse_stage_spec(stage_spec): # The logic used here, a message to a user about how to specify Stage. req_msg = "Stage specification must be either a {0} itself, a " \ "(<name>, {0}) pair, or a callable with a __name__ attribute " \ "(e.g., a non-anonymous function)".format(Stage.__name__) # Simplest case is stage itself. if isinstance(stage_spec, Stage): return stage_spec.name, stage_spec # Handle alternate forms of specification. try: # Unpack pair of name and stage, requiring name first. name, stage = stage_spec except (TypeError, ValueError): # Normally, this sort of unpacking issue create a ValueError. Here, # though, we also need to catch TypeError since that's what arises # if an attempt is made to unpack a single function. # Attempt to parse stage_spec as a single named callable. try: name = stage_spec.__name__ except AttributeError: raise TypeError(req_msg) else: # Control flow here indicates an anonymous function that was not # paired with a name. Prohibit that. if name == (lambda: None).__name__: raise TypeError(req_msg) stage = stage_spec # Ensure that the stage is callable. if not hasattr(stage, "__call__"): raise TypeError(req_msg) return name, Stage(stage, name=name)
[ "def", "_parse_stage_spec", "(", "stage_spec", ")", ":", "# The logic used here, a message to a user about how to specify Stage.", "req_msg", "=", "\"Stage specification must be either a {0} itself, a \"", "\"(<name>, {0}) pair, or a callable with a __name__ attribute \"", "\"(e.g., a non-anon...
Handle alternate Stage specifications, returning name and Stage. Isolate this parsing logic from any iteration. TypeError as single exception type funnel also provides a more uniform way for callers to handle specification errors (e.g., skip a stage, warn, re-raise, etc.) :param (str, pypiper.Stage) | callable stage_spec: name and Stage :return (name, pypiper.Stage): Pair of name and Stage instance from parsing input specification :raise TypeError: if the specification of the stage is not a supported type
[ "Handle", "alternate", "Stage", "specifications", "returning", "name", "and", "Stage", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L405-L452
13,553
databio/pypiper
pypiper/pipeline.py
Pipeline.checkpoint
def checkpoint(self, stage, msg=""): """ Touch checkpoint file for given stage and provide timestamp message. :param pypiper.Stage stage: Stage for which to mark checkpoint :param str msg: Message to embed in timestamp. :return bool: Whether a checkpoint file was written. """ # Canonical usage model for Pipeline checkpointing through # implementations of this class is by automatically creating a # checkpoint when a conceptual unit or group of operations of a # pipeline completes, so fix the 'finished' parameter to the manager's # timestamp method to be True. return self.manager.timestamp( message=msg, checkpoint=stage.checkpoint_name, finished=True)
python
def checkpoint(self, stage, msg=""): # Canonical usage model for Pipeline checkpointing through # implementations of this class is by automatically creating a # checkpoint when a conceptual unit or group of operations of a # pipeline completes, so fix the 'finished' parameter to the manager's # timestamp method to be True. return self.manager.timestamp( message=msg, checkpoint=stage.checkpoint_name, finished=True)
[ "def", "checkpoint", "(", "self", ",", "stage", ",", "msg", "=", "\"\"", ")", ":", "# Canonical usage model for Pipeline checkpointing through", "# implementations of this class is by automatically creating a", "# checkpoint when a conceptual unit or group of operations of a", "# pipel...
Touch checkpoint file for given stage and provide timestamp message. :param pypiper.Stage stage: Stage for which to mark checkpoint :param str msg: Message to embed in timestamp. :return bool: Whether a checkpoint file was written.
[ "Touch", "checkpoint", "file", "for", "given", "stage", "and", "provide", "timestamp", "message", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L181-L195
13,554
databio/pypiper
pypiper/pipeline.py
Pipeline.completed_stage
def completed_stage(self, stage): """ Determine whether the pipeline's completed the stage indicated. :param pypiper.Stage stage: Stage to check for completion status. :return bool: Whether this pipeline's completed the indicated stage. :raises UnknownStageException: If the stage name given is undefined for the pipeline, a ValueError arises. """ check_path = checkpoint_filepath(stage, self.manager) return os.path.exists(check_path)
python
def completed_stage(self, stage): check_path = checkpoint_filepath(stage, self.manager) return os.path.exists(check_path)
[ "def", "completed_stage", "(", "self", ",", "stage", ")", ":", "check_path", "=", "checkpoint_filepath", "(", "stage", ",", "self", ".", "manager", ")", "return", "os", ".", "path", ".", "exists", "(", "check_path", ")" ]
Determine whether the pipeline's completed the stage indicated. :param pypiper.Stage stage: Stage to check for completion status. :return bool: Whether this pipeline's completed the indicated stage. :raises UnknownStageException: If the stage name given is undefined for the pipeline, a ValueError arises.
[ "Determine", "whether", "the", "pipeline", "s", "completed", "the", "stage", "indicated", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L198-L208
13,555
databio/pypiper
pypiper/pipeline.py
Pipeline.list_flags
def list_flags(self, only_name=False): """ Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline. """ paths = glob.glob(os.path.join(self.outfolder, flag_name("*"))) if only_name: return [os.path.split(p)[1] for p in paths] else: return paths
python
def list_flags(self, only_name=False): paths = glob.glob(os.path.join(self.outfolder, flag_name("*"))) if only_name: return [os.path.split(p)[1] for p in paths] else: return paths
[ "def", "list_flags", "(", "self", ",", "only_name", "=", "False", ")", ":", "paths", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "outfolder", ",", "flag_name", "(", "\"*\"", ")", ")", ")", "if", "only_name", ":"...
Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline.
[ "Determine", "the", "flag", "files", "associated", "with", "this", "pipeline", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L216-L228
13,556
databio/pypiper
pypiper/pipeline.py
Pipeline._start_index
def _start_index(self, start=None): """ Seek to the first stage to run. """ if start is None: return 0 start_stage = translate_stage_name(start) internal_names = [translate_stage_name(s.name) for s in self._stages] try: return internal_names.index(start_stage) except ValueError: raise UnknownPipelineStageError(start, self)
python
def _start_index(self, start=None): if start is None: return 0 start_stage = translate_stage_name(start) internal_names = [translate_stage_name(s.name) for s in self._stages] try: return internal_names.index(start_stage) except ValueError: raise UnknownPipelineStageError(start, self)
[ "def", "_start_index", "(", "self", ",", "start", "=", "None", ")", ":", "if", "start", "is", "None", ":", "return", "0", "start_stage", "=", "translate_stage_name", "(", "start", ")", "internal_names", "=", "[", "translate_stage_name", "(", "s", ".", "nam...
Seek to the first stage to run.
[ "Seek", "to", "the", "first", "stage", "to", "run", "." ]
00e6c2b94033c4187d47ff14c5580bbfc2ff097f
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L344-L353
13,557
googlefonts/glyphsLib
Lib/glyphsLib/builder/font.py
to_ufo_font_attributes
def to_ufo_font_attributes(self, family_name): """Generate a list of UFOs with metadata loaded from .glyphs data. Modifies the list of UFOs in the UFOBuilder (self) in-place. """ font = self.font # "date" can be missing; Glyphs.app removes it on saving if it's empty: # https://github.com/googlei18n/glyphsLib/issues/134 date_created = getattr(font, "date", None) if date_created is not None: date_created = to_ufo_time(date_created) units_per_em = font.upm version_major = font.versionMajor version_minor = font.versionMinor copyright = font.copyright designer = font.designer designer_url = font.designerURL manufacturer = font.manufacturer manufacturer_url = font.manufacturerURL # XXX note is unused? # note = font.note glyph_order = list(glyph.name for glyph in font.glyphs) for index, master in enumerate(font.masters): source = self._designspace.newSourceDescriptor() ufo = self.ufo_module.Font() source.font = ufo ufo.lib[APP_VERSION_LIB_KEY] = font.appVersion ufo.lib[KEYBOARD_INCREMENT_KEY] = font.keyboardIncrement if date_created is not None: ufo.info.openTypeHeadCreated = date_created ufo.info.unitsPerEm = units_per_em ufo.info.versionMajor = version_major ufo.info.versionMinor = version_minor if copyright: ufo.info.copyright = copyright if designer: ufo.info.openTypeNameDesigner = designer if designer_url: ufo.info.openTypeNameDesignerURL = designer_url if manufacturer: ufo.info.openTypeNameManufacturer = manufacturer if manufacturer_url: ufo.info.openTypeNameManufacturerURL = manufacturer_url ufo.glyphOrder = glyph_order self.to_ufo_names(ufo, master, family_name) self.to_ufo_family_user_data(ufo) self.to_ufo_custom_params(ufo, font) self.to_ufo_master_attributes(source, master) ufo.lib[MASTER_ORDER_LIB_KEY] = index # FIXME: (jany) in the future, yield this UFO (for memory, lazy iter) self._designspace.addSource(source) self._sources[master.id] = source
python
def to_ufo_font_attributes(self, family_name): font = self.font # "date" can be missing; Glyphs.app removes it on saving if it's empty: # https://github.com/googlei18n/glyphsLib/issues/134 date_created = getattr(font, "date", None) if date_created is not None: date_created = to_ufo_time(date_created) units_per_em = font.upm version_major = font.versionMajor version_minor = font.versionMinor copyright = font.copyright designer = font.designer designer_url = font.designerURL manufacturer = font.manufacturer manufacturer_url = font.manufacturerURL # XXX note is unused? # note = font.note glyph_order = list(glyph.name for glyph in font.glyphs) for index, master in enumerate(font.masters): source = self._designspace.newSourceDescriptor() ufo = self.ufo_module.Font() source.font = ufo ufo.lib[APP_VERSION_LIB_KEY] = font.appVersion ufo.lib[KEYBOARD_INCREMENT_KEY] = font.keyboardIncrement if date_created is not None: ufo.info.openTypeHeadCreated = date_created ufo.info.unitsPerEm = units_per_em ufo.info.versionMajor = version_major ufo.info.versionMinor = version_minor if copyright: ufo.info.copyright = copyright if designer: ufo.info.openTypeNameDesigner = designer if designer_url: ufo.info.openTypeNameDesignerURL = designer_url if manufacturer: ufo.info.openTypeNameManufacturer = manufacturer if manufacturer_url: ufo.info.openTypeNameManufacturerURL = manufacturer_url ufo.glyphOrder = glyph_order self.to_ufo_names(ufo, master, family_name) self.to_ufo_family_user_data(ufo) self.to_ufo_custom_params(ufo, font) self.to_ufo_master_attributes(source, master) ufo.lib[MASTER_ORDER_LIB_KEY] = index # FIXME: (jany) in the future, yield this UFO (for memory, lazy iter) self._designspace.addSource(source) self._sources[master.id] = source
[ "def", "to_ufo_font_attributes", "(", "self", ",", "family_name", ")", ":", "font", "=", "self", ".", "font", "# \"date\" can be missing; Glyphs.app removes it on saving if it's empty:", "# https://github.com/googlei18n/glyphsLib/issues/134", "date_created", "=", "getattr", "(", ...
Generate a list of UFOs with metadata loaded from .glyphs data. Modifies the list of UFOs in the UFOBuilder (self) in-place.
[ "Generate", "a", "list", "of", "UFOs", "with", "metadata", "loaded", "from", ".", "glyphs", "data", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/font.py#L29-L90
13,558
googlefonts/glyphsLib
Lib/glyphsLib/builder/font.py
to_glyphs_font_attributes
def to_glyphs_font_attributes(self, source, master, is_initial): """ Copy font attributes from `ufo` either to `self.font` or to `master`. Arguments: self -- The UFOBuilder ufo -- The current UFO being read master -- The current master being written is_initial -- True iff this the first UFO that we process """ if is_initial: _set_glyphs_font_attributes(self, source) else: _compare_and_merge_glyphs_font_attributes(self, source)
python
def to_glyphs_font_attributes(self, source, master, is_initial): if is_initial: _set_glyphs_font_attributes(self, source) else: _compare_and_merge_glyphs_font_attributes(self, source)
[ "def", "to_glyphs_font_attributes", "(", "self", ",", "source", ",", "master", ",", "is_initial", ")", ":", "if", "is_initial", ":", "_set_glyphs_font_attributes", "(", "self", ",", "source", ")", "else", ":", "_compare_and_merge_glyphs_font_attributes", "(", "self"...
Copy font attributes from `ufo` either to `self.font` or to `master`. Arguments: self -- The UFOBuilder ufo -- The current UFO being read master -- The current master being written is_initial -- True iff this the first UFO that we process
[ "Copy", "font", "attributes", "from", "ufo", "either", "to", "self", ".", "font", "or", "to", "master", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/font.py#L93-L106
13,559
googlefonts/glyphsLib
Lib/glyphsLib/builder/glyph.py
to_ufo_glyph_background
def to_ufo_glyph_background(self, glyph, layer): """Set glyph background.""" if not layer.hasBackground: return background = layer.background ufo_layer = self.to_ufo_background_layer(glyph) new_glyph = ufo_layer.newGlyph(glyph.name) width = background.userData[BACKGROUND_WIDTH_KEY] if width is not None: new_glyph.width = width self.to_ufo_background_image(new_glyph, background) self.to_ufo_paths(new_glyph, background) self.to_ufo_components(new_glyph, background) self.to_ufo_glyph_anchors(new_glyph, background.anchors) self.to_ufo_guidelines(new_glyph, background)
python
def to_ufo_glyph_background(self, glyph, layer): if not layer.hasBackground: return background = layer.background ufo_layer = self.to_ufo_background_layer(glyph) new_glyph = ufo_layer.newGlyph(glyph.name) width = background.userData[BACKGROUND_WIDTH_KEY] if width is not None: new_glyph.width = width self.to_ufo_background_image(new_glyph, background) self.to_ufo_paths(new_glyph, background) self.to_ufo_components(new_glyph, background) self.to_ufo_glyph_anchors(new_glyph, background.anchors) self.to_ufo_guidelines(new_glyph, background)
[ "def", "to_ufo_glyph_background", "(", "self", ",", "glyph", ",", "layer", ")", ":", "if", "not", "layer", ".", "hasBackground", ":", "return", "background", "=", "layer", ".", "background", "ufo_layer", "=", "self", ".", "to_ufo_background_layer", "(", "glyph...
Set glyph background.
[ "Set", "glyph", "background", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/glyph.py#L244-L262
13,560
googlefonts/glyphsLib
Lib/glyphsLib/builder/instances.py
to_designspace_instances
def to_designspace_instances(self): """Write instance data from self.font to self.designspace.""" for instance in self.font.instances: if self.minimize_glyphs_diffs or ( is_instance_active(instance) and _is_instance_included_in_family(self, instance) ): _to_designspace_instance(self, instance)
python
def to_designspace_instances(self): for instance in self.font.instances: if self.minimize_glyphs_diffs or ( is_instance_active(instance) and _is_instance_included_in_family(self, instance) ): _to_designspace_instance(self, instance)
[ "def", "to_designspace_instances", "(", "self", ")", ":", "for", "instance", "in", "self", ".", "font", ".", "instances", ":", "if", "self", ".", "minimize_glyphs_diffs", "or", "(", "is_instance_active", "(", "instance", ")", "and", "_is_instance_included_in_famil...
Write instance data from self.font to self.designspace.
[ "Write", "instance", "data", "from", "self", ".", "font", "to", "self", ".", "designspace", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/instances.py#L49-L56
13,561
googlefonts/glyphsLib
Lib/glyphsLib/builder/instances.py
apply_instance_data
def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font): """Open UFO instances referenced by designspace, apply Glyphs instance data if present, re-save UFOs and return updated UFO Font objects. Args: designspace: DesignSpaceDocument object or path (str or PathLike) to a designspace file. include_filenames: optional set of instance filenames (relative to the designspace path) to be included. By default all instaces are processed. Font: the class used to load the UFO (default: defcon.Font). Returns: List of opened and updated instance UFOs. """ from fontTools.designspaceLib import DesignSpaceDocument from os.path import normcase, normpath if hasattr(designspace, "__fspath__"): designspace = designspace.__fspath__() if isinstance(designspace, basestring): designspace = DesignSpaceDocument.fromfile(designspace) basedir = os.path.dirname(designspace.path) instance_ufos = [] if include_filenames is not None: include_filenames = {normcase(normpath(p)) for p in include_filenames} for designspace_instance in designspace.instances: fname = designspace_instance.filename assert fname is not None, "instance %r missing required filename" % getattr( designspace_instance, "name", designspace_instance ) if include_filenames is not None: fname = normcase(normpath(fname)) if fname not in include_filenames: continue logger.debug("Applying instance data to %s", fname) # fontmake <= 1.4.0 compares the ufo paths returned from this function # to the keys of a dict of designspace locations that have been passed # through normpath (but not normcase). We do the same. ufo = Font(normpath(os.path.join(basedir, fname))) set_weight_class(ufo, designspace, designspace_instance) set_width_class(ufo, designspace, designspace_instance) glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance) to_ufo_custom_params(None, ufo, glyphs_instance) ufo.save() instance_ufos.append(ufo) return instance_ufos
python
def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font): from fontTools.designspaceLib import DesignSpaceDocument from os.path import normcase, normpath if hasattr(designspace, "__fspath__"): designspace = designspace.__fspath__() if isinstance(designspace, basestring): designspace = DesignSpaceDocument.fromfile(designspace) basedir = os.path.dirname(designspace.path) instance_ufos = [] if include_filenames is not None: include_filenames = {normcase(normpath(p)) for p in include_filenames} for designspace_instance in designspace.instances: fname = designspace_instance.filename assert fname is not None, "instance %r missing required filename" % getattr( designspace_instance, "name", designspace_instance ) if include_filenames is not None: fname = normcase(normpath(fname)) if fname not in include_filenames: continue logger.debug("Applying instance data to %s", fname) # fontmake <= 1.4.0 compares the ufo paths returned from this function # to the keys of a dict of designspace locations that have been passed # through normpath (but not normcase). We do the same. ufo = Font(normpath(os.path.join(basedir, fname))) set_weight_class(ufo, designspace, designspace_instance) set_width_class(ufo, designspace, designspace_instance) glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance) to_ufo_custom_params(None, ufo, glyphs_instance) ufo.save() instance_ufos.append(ufo) return instance_ufos
[ "def", "apply_instance_data", "(", "designspace", ",", "include_filenames", "=", "None", ",", "Font", "=", "defcon", ".", "Font", ")", ":", "from", "fontTools", ".", "designspaceLib", "import", "DesignSpaceDocument", "from", "os", ".", "path", "import", "normcas...
Open UFO instances referenced by designspace, apply Glyphs instance data if present, re-save UFOs and return updated UFO Font objects. Args: designspace: DesignSpaceDocument object or path (str or PathLike) to a designspace file. include_filenames: optional set of instance filenames (relative to the designspace path) to be included. By default all instaces are processed. Font: the class used to load the UFO (default: defcon.Font). Returns: List of opened and updated instance UFOs.
[ "Open", "UFO", "instances", "referenced", "by", "designspace", "apply", "Glyphs", "instance", "data", "if", "present", "re", "-", "save", "UFOs", "and", "return", "updated", "UFO", "Font", "objects", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/instances.py#L334-L384
13,562
googlefonts/glyphsLib
Lib/glyphsLib/builder/features.py
_to_ufo_features
def _to_ufo_features(self, master, ufo): """Write an UFO's OpenType feature file.""" # Recover the original feature code if it was stored in the user data original = master.userData[ORIGINAL_FEATURE_CODE_KEY] if original is not None: ufo.features.text = original return prefixes = [] for prefix in self.font.featurePrefixes: strings = [] if prefix.name != ANONYMOUS_FEATURE_PREFIX_NAME: strings.append("# Prefix: %s\n" % prefix.name) strings.append(autostr(prefix.automatic)) strings.append(prefix.code) prefixes.append("".join(strings)) prefix_str = "\n\n".join(prefixes) class_defs = [] for class_ in self.font.classes: prefix = "@" if not class_.name.startswith("@") else "" name = prefix + class_.name class_defs.append( "{}{} = [ {} ];".format(autostr(class_.automatic), name, class_.code) ) class_str = "\n\n".join(class_defs) feature_defs = [] for feature in self.font.features: code = feature.code lines = ["feature %s {" % feature.name] if feature.notes: lines.append("# notes:") lines.extend("# " + line for line in feature.notes.splitlines()) if feature.automatic: lines.append("# automatic") if feature.disabled: lines.append("# disabled") lines.extend("#" + line for line in code.splitlines()) else: lines.append(code) lines.append("} %s;" % feature.name) feature_defs.append("\n".join(lines)) fea_str = "\n\n".join(feature_defs) # Don't add a GDEF table when planning to round-trip. To get Glyphs.app-like # results, we would need anchor propagation or user intervention. Glyphs.app # only generates it on generating binaries. gdef_str = None if self.generate_GDEF: if re.search(r"^\s*table\s+GDEF\s+{", prefix_str, flags=re.MULTILINE): raise ValueError( "The features already contain a `table GDEF {...}` statement. " "Either delete it or set generate_GDEF to False." ) gdef_str = _build_gdef( ufo, self._designspace.lib.get("public.skipExportGlyphs") ) # make sure feature text is a unicode string, for defcon full_text = ( "\n\n".join(filter(None, [class_str, prefix_str, fea_str, gdef_str])) + "\n" ) ufo.features.text = full_text if full_text.strip() else ""
python
def _to_ufo_features(self, master, ufo): # Recover the original feature code if it was stored in the user data original = master.userData[ORIGINAL_FEATURE_CODE_KEY] if original is not None: ufo.features.text = original return prefixes = [] for prefix in self.font.featurePrefixes: strings = [] if prefix.name != ANONYMOUS_FEATURE_PREFIX_NAME: strings.append("# Prefix: %s\n" % prefix.name) strings.append(autostr(prefix.automatic)) strings.append(prefix.code) prefixes.append("".join(strings)) prefix_str = "\n\n".join(prefixes) class_defs = [] for class_ in self.font.classes: prefix = "@" if not class_.name.startswith("@") else "" name = prefix + class_.name class_defs.append( "{}{} = [ {} ];".format(autostr(class_.automatic), name, class_.code) ) class_str = "\n\n".join(class_defs) feature_defs = [] for feature in self.font.features: code = feature.code lines = ["feature %s {" % feature.name] if feature.notes: lines.append("# notes:") lines.extend("# " + line for line in feature.notes.splitlines()) if feature.automatic: lines.append("# automatic") if feature.disabled: lines.append("# disabled") lines.extend("#" + line for line in code.splitlines()) else: lines.append(code) lines.append("} %s;" % feature.name) feature_defs.append("\n".join(lines)) fea_str = "\n\n".join(feature_defs) # Don't add a GDEF table when planning to round-trip. To get Glyphs.app-like # results, we would need anchor propagation or user intervention. Glyphs.app # only generates it on generating binaries. gdef_str = None if self.generate_GDEF: if re.search(r"^\s*table\s+GDEF\s+{", prefix_str, flags=re.MULTILINE): raise ValueError( "The features already contain a `table GDEF {...}` statement. " "Either delete it or set generate_GDEF to False." ) gdef_str = _build_gdef( ufo, self._designspace.lib.get("public.skipExportGlyphs") ) # make sure feature text is a unicode string, for defcon full_text = ( "\n\n".join(filter(None, [class_str, prefix_str, fea_str, gdef_str])) + "\n" ) ufo.features.text = full_text if full_text.strip() else ""
[ "def", "_to_ufo_features", "(", "self", ",", "master", ",", "ufo", ")", ":", "# Recover the original feature code if it was stored in the user data", "original", "=", "master", ".", "userData", "[", "ORIGINAL_FEATURE_CODE_KEY", "]", "if", "original", "is", "not", "None"...
Write an UFO's OpenType feature file.
[ "Write", "an", "UFO", "s", "OpenType", "feature", "file", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/features.py#L42-L107
13,563
googlefonts/glyphsLib
Lib/glyphsLib/builder/features.py
FeatureFileProcessor._pop_comment
def _pop_comment(self, statements, comment_re): """Look for the comment that matches the given regex. If it matches, return the regex match object and list of statements without the special one. """ res = [] match = None for st in statements: if match or not isinstance(st, ast.Comment): res.append(st) continue match = comment_re.match(st.text) if not match: res.append(st) return match, res
python
def _pop_comment(self, statements, comment_re): res = [] match = None for st in statements: if match or not isinstance(st, ast.Comment): res.append(st) continue match = comment_re.match(st.text) if not match: res.append(st) return match, res
[ "def", "_pop_comment", "(", "self", ",", "statements", ",", "comment_re", ")", ":", "res", "=", "[", "]", "match", "=", "None", "for", "st", "in", "statements", ":", "if", "match", "or", "not", "isinstance", "(", "st", ",", "ast", ".", "Comment", ")"...
Look for the comment that matches the given regex. If it matches, return the regex match object and list of statements without the special one.
[ "Look", "for", "the", "comment", "that", "matches", "the", "given", "regex", ".", "If", "it", "matches", "return", "the", "regex", "match", "object", "and", "list", "of", "statements", "without", "the", "special", "one", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/features.py#L566-L580
13,564
googlefonts/glyphsLib
Lib/glyphsLib/builder/features.py
FeatureFileProcessor._pop_comment_block
def _pop_comment_block(self, statements, header_re): """Look for a series of comments that start with one that matches the regex. If the first comment is found, all subsequent comments are popped from statements, concatenated and dedented and returned. """ res = [] comments = [] match = None st_iter = iter(statements) # Look for the header for st in st_iter: if isinstance(st, ast.Comment): match = header_re.match(st.text) if match: # Drop this comment an move on to consuming the block break else: res.append(st) else: res.append(st) # Consume consecutive comments for st in st_iter: if isinstance(st, ast.Comment): comments.append(st) else: # The block is over, keep the rest of the statements res.append(st) break # Keep the rest of the statements res.extend(list(st_iter)) # Inside the comment block, drop the pound sign and any common indent return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res
python
def _pop_comment_block(self, statements, header_re): res = [] comments = [] match = None st_iter = iter(statements) # Look for the header for st in st_iter: if isinstance(st, ast.Comment): match = header_re.match(st.text) if match: # Drop this comment an move on to consuming the block break else: res.append(st) else: res.append(st) # Consume consecutive comments for st in st_iter: if isinstance(st, ast.Comment): comments.append(st) else: # The block is over, keep the rest of the statements res.append(st) break # Keep the rest of the statements res.extend(list(st_iter)) # Inside the comment block, drop the pound sign and any common indent return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res
[ "def", "_pop_comment_block", "(", "self", ",", "statements", ",", "header_re", ")", ":", "res", "=", "[", "]", "comments", "=", "[", "]", "match", "=", "None", "st_iter", "=", "iter", "(", "statements", ")", "# Look for the header", "for", "st", "in", "s...
Look for a series of comments that start with one that matches the regex. If the first comment is found, all subsequent comments are popped from statements, concatenated and dedented and returned.
[ "Look", "for", "a", "series", "of", "comments", "that", "start", "with", "one", "that", "matches", "the", "regex", ".", "If", "the", "first", "comment", "is", "found", "all", "subsequent", "comments", "are", "popped", "from", "statements", "concatenated", "a...
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/features.py#L582-L613
13,565
4Catalyzer/flask-resty
flask_resty/spec/declaration.py
ApiViewDeclaration.get_marshmallow_schema_name
def get_marshmallow_schema_name(self, plugin, schema): """Get the schema name. If the schema doesn't exist, create it. """ try: return plugin.openapi.refs[schema] except KeyError: plugin.spec.definition(schema.__name__, schema=schema) return schema.__name__
python
def get_marshmallow_schema_name(self, plugin, schema): try: return plugin.openapi.refs[schema] except KeyError: plugin.spec.definition(schema.__name__, schema=schema) return schema.__name__
[ "def", "get_marshmallow_schema_name", "(", "self", ",", "plugin", ",", "schema", ")", ":", "try", ":", "return", "plugin", ".", "openapi", ".", "refs", "[", "schema", "]", "except", "KeyError", ":", "plugin", ".", "spec", ".", "definition", "(", "schema", ...
Get the schema name. If the schema doesn't exist, create it.
[ "Get", "the", "schema", "name", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/spec/declaration.py#L85-L94
13,566
googlefonts/glyphsLib
Lib/glyphsLib/builder/components.py
to_ufo_components
def to_ufo_components(self, ufo_glyph, layer): """Draw .glyphs components onto a pen, adding them to the parent glyph.""" pen = ufo_glyph.getPointPen() for index, component in enumerate(layer.components): pen.addComponent(component.name, component.transform) if component.anchor: if COMPONENT_INFO_KEY not in ufo_glyph.lib: ufo_glyph.lib[COMPONENT_INFO_KEY] = [] ufo_glyph.lib[COMPONENT_INFO_KEY].append( {"name": component.name, "index": index, "anchor": component.anchor} ) # data related to components stored in lists of booleans # each list's elements correspond to the components in order for key in ["alignment", "locked", "smartComponentValues"]: values = [getattr(c, key) for c in layer.components] if any(values): ufo_glyph.lib[_lib_key(key)] = values
python
def to_ufo_components(self, ufo_glyph, layer): pen = ufo_glyph.getPointPen() for index, component in enumerate(layer.components): pen.addComponent(component.name, component.transform) if component.anchor: if COMPONENT_INFO_KEY not in ufo_glyph.lib: ufo_glyph.lib[COMPONENT_INFO_KEY] = [] ufo_glyph.lib[COMPONENT_INFO_KEY].append( {"name": component.name, "index": index, "anchor": component.anchor} ) # data related to components stored in lists of booleans # each list's elements correspond to the components in order for key in ["alignment", "locked", "smartComponentValues"]: values = [getattr(c, key) for c in layer.components] if any(values): ufo_glyph.lib[_lib_key(key)] = values
[ "def", "to_ufo_components", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "pen", "=", "ufo_glyph", ".", "getPointPen", "(", ")", "for", "index", ",", "component", "in", "enumerate", "(", "layer", ".", "components", ")", ":", "pen", ".", "addComp...
Draw .glyphs components onto a pen, adding them to the parent glyph.
[ "Draw", ".", "glyphs", "components", "onto", "a", "pen", "adding", "them", "to", "the", "parent", "glyph", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/components.py#L26-L45
13,567
4Catalyzer/flask-resty
flask_resty/view.py
ApiView.request_args
def request_args(self): """Use args_schema to parse request query arguments.""" args = flask.request.args data_raw = {} for field_name, field in self.args_schema.fields.items(): alternate_field_name = field.load_from if MA2 else field.data_key if alternate_field_name and alternate_field_name in args: field_name = alternate_field_name elif field_name not in args: # getlist will return an empty list instead of raising a # KeyError for args that aren't present. continue value = args.getlist(field_name) if not self.is_list_field(field) and len(value) == 1: value = value[0] data_raw[field_name] = value return self.deserialize_args(data_raw)
python
def request_args(self): args = flask.request.args data_raw = {} for field_name, field in self.args_schema.fields.items(): alternate_field_name = field.load_from if MA2 else field.data_key if alternate_field_name and alternate_field_name in args: field_name = alternate_field_name elif field_name not in args: # getlist will return an empty list instead of raising a # KeyError for args that aren't present. continue value = args.getlist(field_name) if not self.is_list_field(field) and len(value) == 1: value = value[0] data_raw[field_name] = value return self.deserialize_args(data_raw)
[ "def", "request_args", "(", "self", ")", ":", "args", "=", "flask", ".", "request", ".", "args", "data_raw", "=", "{", "}", "for", "field_name", ",", "field", "in", "self", ".", "args_schema", ".", "fields", ".", "items", "(", ")", ":", "alternate_fiel...
Use args_schema to parse request query arguments.
[ "Use", "args_schema", "to", "parse", "request", "query", "arguments", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/view.py#L167-L188
13,568
4Catalyzer/flask-resty
flask_resty/view.py
ModelView.query
def query(self): """The SQLAlchemy query for the view. Override this to customize the query to fetch items in this view. By default, this applies the filter from the view's `authorization` and the query options from `base_query_options` and `query_options`. """ query = self.query_raw query = self.authorization.filter_query(query, self) query = query.options( *itertools.chain(self.base_query_options, self.query_options) ) return query
python
def query(self): query = self.query_raw query = self.authorization.filter_query(query, self) query = query.options( *itertools.chain(self.base_query_options, self.query_options) ) return query
[ "def", "query", "(", "self", ")", ":", "query", "=", "self", ".", "query_raw", "query", "=", "self", ".", "authorization", ".", "filter_query", "(", "query", ",", "self", ")", "query", "=", "query", ".", "options", "(", "*", "itertools", ".", "chain", ...
The SQLAlchemy query for the view. Override this to customize the query to fetch items in this view. By default, this applies the filter from the view's `authorization` and the query options from `base_query_options` and `query_options`.
[ "The", "SQLAlchemy", "query", "for", "the", "view", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/view.py#L245-L259
13,569
4Catalyzer/flask-resty
flask_resty/view.py
ModelView.query_options
def query_options(self): """Options to apply to the query for the view. Set this to configure relationship and column loading. By default, this calls the ``get_query_options`` method on the serializer with a `Load` object bound to the model, if that serializer method exists. """ if not hasattr(self.serializer, 'get_query_options'): return () return self.serializer.get_query_options(Load(self.model))
python
def query_options(self): if not hasattr(self.serializer, 'get_query_options'): return () return self.serializer.get_query_options(Load(self.model))
[ "def", "query_options", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "serializer", ",", "'get_query_options'", ")", ":", "return", "(", ")", "return", "self", ".", "serializer", ".", "get_query_options", "(", "Load", "(", "self", ".", ...
Options to apply to the query for the view. Set this to configure relationship and column loading. By default, this calls the ``get_query_options`` method on the serializer with a `Load` object bound to the model, if that serializer method exists.
[ "Options", "to", "apply", "to", "the", "query", "for", "the", "view", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/view.py#L273-L285
13,570
googlefonts/glyphsLib
Lib/glyphsLib/builder/paths.py
to_ufo_paths
def to_ufo_paths(self, ufo_glyph, layer): """Draw .glyphs paths onto a pen.""" pen = ufo_glyph.getPointPen() for path in layer.paths: # the list is changed below, otherwise you can't draw more than once # per session. nodes = list(path.nodes) for node in nodes: self.to_ufo_node_user_data(ufo_glyph, node) pen.beginPath() if not nodes: pen.endPath() continue if not path.closed: node = nodes.pop(0) assert node.type == "line", "Open path starts with off-curve points" pen.addPoint(tuple(node.position), segmentType="move") else: # In Glyphs.app, the starting node of a closed contour is always # stored at the end of the nodes list. nodes.insert(0, nodes.pop()) for node in nodes: node_type = _to_ufo_node_type(node.type) pen.addPoint( tuple(node.position), segmentType=node_type, smooth=node.smooth ) pen.endPath()
python
def to_ufo_paths(self, ufo_glyph, layer): pen = ufo_glyph.getPointPen() for path in layer.paths: # the list is changed below, otherwise you can't draw more than once # per session. nodes = list(path.nodes) for node in nodes: self.to_ufo_node_user_data(ufo_glyph, node) pen.beginPath() if not nodes: pen.endPath() continue if not path.closed: node = nodes.pop(0) assert node.type == "line", "Open path starts with off-curve points" pen.addPoint(tuple(node.position), segmentType="move") else: # In Glyphs.app, the starting node of a closed contour is always # stored at the end of the nodes list. nodes.insert(0, nodes.pop()) for node in nodes: node_type = _to_ufo_node_type(node.type) pen.addPoint( tuple(node.position), segmentType=node_type, smooth=node.smooth ) pen.endPath()
[ "def", "to_ufo_paths", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "pen", "=", "ufo_glyph", ".", "getPointPen", "(", ")", "for", "path", "in", "layer", ".", "paths", ":", "# the list is changed below, otherwise you can't draw more than once", "# per sessi...
Draw .glyphs paths onto a pen.
[ "Draw", ".", "glyphs", "paths", "onto", "a", "pen", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/paths.py#L21-L49
13,571
4Catalyzer/flask-resty
flask_resty/decorators.py
request_cached_property
def request_cached_property(func): """Make the given method a per-request cached property. This caches the value on the request context rather than on the object itself, preventing problems if the object gets reused across multiple requests. """ @property @functools.wraps(func) def wrapped(self): cached_value = context.get_for_view(self, func.__name__, UNDEFINED) if cached_value is not UNDEFINED: return cached_value value = func(self) context.set_for_view(self, func.__name__, value) return value return wrapped
python
def request_cached_property(func): @property @functools.wraps(func) def wrapped(self): cached_value = context.get_for_view(self, func.__name__, UNDEFINED) if cached_value is not UNDEFINED: return cached_value value = func(self) context.set_for_view(self, func.__name__, value) return value return wrapped
[ "def", "request_cached_property", "(", "func", ")", ":", "@", "property", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "self", ")", ":", "cached_value", "=", "context", ".", "get_for_view", "(", "self", ",", "func", ".", "__na...
Make the given method a per-request cached property. This caches the value on the request context rather than on the object itself, preventing problems if the object gets reused across multiple requests.
[ "Make", "the", "given", "method", "a", "per", "-", "request", "cached", "property", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/decorators.py#L31-L50
13,572
googlefonts/glyphsLib
Lib/glyphsLib/builder/groups.py
_ufo_logging_ref
def _ufo_logging_ref(ufo): """Return a string that can identify this UFO in logs.""" if ufo.path: return os.path.basename(ufo.path) return ufo.info.styleName
python
def _ufo_logging_ref(ufo): if ufo.path: return os.path.basename(ufo.path) return ufo.info.styleName
[ "def", "_ufo_logging_ref", "(", "ufo", ")", ":", "if", "ufo", ".", "path", ":", "return", "os", ".", "path", ".", "basename", "(", "ufo", ".", "path", ")", "return", "ufo", ".", "info", ".", "styleName" ]
Return a string that can identify this UFO in logs.
[ "Return", "a", "string", "that", "can", "identify", "this", "UFO", "in", "logs", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/groups.py#L180-L184
13,573
googlefonts/glyphsLib
Lib/glyphsLib/types.py
parse_datetime
def parse_datetime(src=None): """Parse a datetime object from a string.""" if src is None: return None string = src.replace('"', "") # parse timezone ourselves, since %z is not always supported # see: http://bugs.python.org/issue6641 m = UTC_OFFSET_RE.match(string) if m: sign = 1 if m.group("sign") == "+" else -1 tz_hours = sign * int(m.group("hours")) tz_minutes = sign * int(m.group("minutes")) offset = datetime.timedelta(hours=tz_hours, minutes=tz_minutes) string = string[:-6] else: # no explicit timezone offset = datetime.timedelta(0) if "AM" in string or "PM" in string: datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%d %I:%M:%S %p") else: datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S") return datetime_obj + offset
python
def parse_datetime(src=None): if src is None: return None string = src.replace('"', "") # parse timezone ourselves, since %z is not always supported # see: http://bugs.python.org/issue6641 m = UTC_OFFSET_RE.match(string) if m: sign = 1 if m.group("sign") == "+" else -1 tz_hours = sign * int(m.group("hours")) tz_minutes = sign * int(m.group("minutes")) offset = datetime.timedelta(hours=tz_hours, minutes=tz_minutes) string = string[:-6] else: # no explicit timezone offset = datetime.timedelta(0) if "AM" in string or "PM" in string: datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%d %I:%M:%S %p") else: datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S") return datetime_obj + offset
[ "def", "parse_datetime", "(", "src", "=", "None", ")", ":", "if", "src", "is", "None", ":", "return", "None", "string", "=", "src", ".", "replace", "(", "'\"'", ",", "\"\"", ")", "# parse timezone ourselves, since %z is not always supported", "# see: http://bugs.p...
Parse a datetime object from a string.
[ "Parse", "a", "datetime", "object", "from", "a", "string", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/types.py#L267-L288
13,574
googlefonts/glyphsLib
Lib/glyphsLib/types.py
parse_color
def parse_color(src=None): # type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]] """Parse a string representing a color value. Color is either a fixed color (when coloring something from the UI, see the GLYPHS_COLORS constant) or a list of the format [u8, u8, u8, u8], Glyphs does not support an alpha channel as of 2.5.1 (confirmed by Georg Seifert), and always writes a 1 to it. This was brought up and is probably corrected in the next versions. https://github.com/googlei18n/glyphsLib/pull/363#issuecomment-390418497 """ if src is None: return None # Tuple. if src[0] == "(": rgba = tuple(int(v) for v in src[1:-1].split(",") if v) if not (len(rgba) == 4 and all(0 <= v < 256 for v in rgba)): raise ValueError( "Broken color tuple: {}. Must have four values from 0 to 255.".format( src ) ) return rgba # Constant. return int(src)
python
def parse_color(src=None): # type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]] if src is None: return None # Tuple. if src[0] == "(": rgba = tuple(int(v) for v in src[1:-1].split(",") if v) if not (len(rgba) == 4 and all(0 <= v < 256 for v in rgba)): raise ValueError( "Broken color tuple: {}. Must have four values from 0 to 255.".format( src ) ) return rgba # Constant. return int(src)
[ "def", "parse_color", "(", "src", "=", "None", ")", ":", "# type: (Optional[str]) -> Optional[Union[Tuple[int, ...], int]]", "if", "src", "is", "None", ":", "return", "None", "# Tuple.", "if", "src", "[", "0", "]", "==", "\"(\"", ":", "rgba", "=", "tuple", "("...
Parse a string representing a color value. Color is either a fixed color (when coloring something from the UI, see the GLYPHS_COLORS constant) or a list of the format [u8, u8, u8, u8], Glyphs does not support an alpha channel as of 2.5.1 (confirmed by Georg Seifert), and always writes a 1 to it. This was brought up and is probably corrected in the next versions. https://github.com/googlei18n/glyphsLib/pull/363#issuecomment-390418497
[ "Parse", "a", "string", "representing", "a", "color", "value", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/types.py#L305-L334
13,575
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser.parse
def parse(self, text): """Do the parsing.""" text = tounicode(text, encoding="utf-8") result, i = self._parse(text, 0) if text[i:].strip(): self._fail("Unexpected trailing content", text, i) return result
python
def parse(self, text): text = tounicode(text, encoding="utf-8") result, i = self._parse(text, 0) if text[i:].strip(): self._fail("Unexpected trailing content", text, i) return result
[ "def", "parse", "(", "self", ",", "text", ")", ":", "text", "=", "tounicode", "(", "text", ",", "encoding", "=", "\"utf-8\"", ")", "result", ",", "i", "=", "self", ".", "_parse", "(", "text", ",", "0", ")", "if", "text", "[", "i", ":", "]", "."...
Do the parsing.
[ "Do", "the", "parsing", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L48-L55
13,576
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser.parse_into_object
def parse_into_object(self, res, text): """Parse data into an existing GSFont instance.""" text = tounicode(text, encoding="utf-8") m = self.start_dict_re.match(text, 0) if m: i = self._parse_dict_into_object(res, text, 1) else: self._fail("not correct file format", text, 0) if text[i:].strip(): self._fail("Unexpected trailing content", text, i) return i
python
def parse_into_object(self, res, text): text = tounicode(text, encoding="utf-8") m = self.start_dict_re.match(text, 0) if m: i = self._parse_dict_into_object(res, text, 1) else: self._fail("not correct file format", text, 0) if text[i:].strip(): self._fail("Unexpected trailing content", text, i) return i
[ "def", "parse_into_object", "(", "self", ",", "res", ",", "text", ")", ":", "text", "=", "tounicode", "(", "text", ",", "encoding", "=", "\"utf-8\"", ")", "m", "=", "self", ".", "start_dict_re", ".", "match", "(", "text", ",", "0", ")", "if", "m", ...
Parse data into an existing GSFont instance.
[ "Parse", "data", "into", "an", "existing", "GSFont", "instance", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L57-L69
13,577
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser._parse
def _parse(self, text, i): """Recursive function to parse a single dictionary, list, or value.""" m = self.start_dict_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) return self._parse_dict(text, i) m = self.start_list_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) return self._parse_list(text, i) m = self.value_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) if hasattr(self.current_type, "read"): reader = self.current_type() # Give the escaped value to `read` to be symetrical with # `plistValue` which handles the escaping itself. value = reader.read(m.group(1)) return value, i value = self._trim_value(m.group(1)) if self.current_type in (None, dict, OrderedDict): self.current_type = self._guess_current_type(parsed, value) if self.current_type == bool: value = bool(int(value)) # bool(u'0') returns True return value, i value = self.current_type(value) return value, i m = self.hex_re.match(text, i) if m: from glyphsLib.types import BinaryData parsed, value = m.group(0), m.group(1) decoded = BinaryData.fromHex(value) i += len(parsed) return decoded, i else: self._fail("Unexpected content", text, i)
python
def _parse(self, text, i): m = self.start_dict_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) return self._parse_dict(text, i) m = self.start_list_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) return self._parse_list(text, i) m = self.value_re.match(text, i) if m: parsed = m.group(0) i += len(parsed) if hasattr(self.current_type, "read"): reader = self.current_type() # Give the escaped value to `read` to be symetrical with # `plistValue` which handles the escaping itself. value = reader.read(m.group(1)) return value, i value = self._trim_value(m.group(1)) if self.current_type in (None, dict, OrderedDict): self.current_type = self._guess_current_type(parsed, value) if self.current_type == bool: value = bool(int(value)) # bool(u'0') returns True return value, i value = self.current_type(value) return value, i m = self.hex_re.match(text, i) if m: from glyphsLib.types import BinaryData parsed, value = m.group(0), m.group(1) decoded = BinaryData.fromHex(value) i += len(parsed) return decoded, i else: self._fail("Unexpected content", text, i)
[ "def", "_parse", "(", "self", ",", "text", ",", "i", ")", ":", "m", "=", "self", ".", "start_dict_re", ".", "match", "(", "text", ",", "i", ")", "if", "m", ":", "parsed", "=", "m", ".", "group", "(", "0", ")", "i", "+=", "len", "(", "parsed",...
Recursive function to parse a single dictionary, list, or value.
[ "Recursive", "function", "to", "parse", "a", "single", "dictionary", "list", "or", "value", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L91-L139
13,578
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser._parse_dict
def _parse_dict(self, text, i): """Parse a dictionary from source text starting at i.""" old_current_type = self.current_type new_type = self.current_type if new_type is None: # customparameter.value needs to be set from the found value new_type = dict elif type(new_type) == list: new_type = new_type[0] res = new_type() i = self._parse_dict_into_object(res, text, i) self.current_type = old_current_type return res, i
python
def _parse_dict(self, text, i): old_current_type = self.current_type new_type = self.current_type if new_type is None: # customparameter.value needs to be set from the found value new_type = dict elif type(new_type) == list: new_type = new_type[0] res = new_type() i = self._parse_dict_into_object(res, text, i) self.current_type = old_current_type return res, i
[ "def", "_parse_dict", "(", "self", ",", "text", ",", "i", ")", ":", "old_current_type", "=", "self", ".", "current_type", "new_type", "=", "self", ".", "current_type", "if", "new_type", "is", "None", ":", "# customparameter.value needs to be set from the found value...
Parse a dictionary from source text starting at i.
[ "Parse", "a", "dictionary", "from", "source", "text", "starting", "at", "i", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L141-L153
13,579
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser._parse_list
def _parse_list(self, text, i): """Parse a list from source text starting at i.""" res = [] end_match = self.end_list_re.match(text, i) old_current_type = self.current_type while not end_match: list_item, i = self._parse(text, i) res.append(list_item) end_match = self.end_list_re.match(text, i) if not end_match: m = self.list_delim_re.match(text, i) if not m: self._fail("Missing delimiter in list before content", text, i) parsed = m.group(0) i += len(parsed) self.current_type = old_current_type parsed = end_match.group(0) i += len(parsed) return res, i
python
def _parse_list(self, text, i): res = [] end_match = self.end_list_re.match(text, i) old_current_type = self.current_type while not end_match: list_item, i = self._parse(text, i) res.append(list_item) end_match = self.end_list_re.match(text, i) if not end_match: m = self.list_delim_re.match(text, i) if not m: self._fail("Missing delimiter in list before content", text, i) parsed = m.group(0) i += len(parsed) self.current_type = old_current_type parsed = end_match.group(0) i += len(parsed) return res, i
[ "def", "_parse_list", "(", "self", ",", "text", ",", "i", ")", ":", "res", "=", "[", "]", "end_match", "=", "self", ".", "end_list_re", ".", "match", "(", "text", ",", "i", ")", "old_current_type", "=", "self", ".", "current_type", "while", "not", "e...
Parse a list from source text starting at i.
[ "Parse", "a", "list", "from", "source", "text", "starting", "at", "i", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L187-L209
13,580
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser._trim_value
def _trim_value(self, value): """Trim double quotes off the ends of a value, un-escaping inner double quotes and literal backslashes. Also convert escapes to unicode. If the string is not quoted, return it unmodified. """ if value[0] == '"': assert value[-1] == '"' value = value[1:-1].replace('\\"', '"').replace("\\\\", "\\") return Parser._unescape_re.sub(Parser._unescape_fn, value) return value
python
def _trim_value(self, value): if value[0] == '"': assert value[-1] == '"' value = value[1:-1].replace('\\"', '"').replace("\\\\", "\\") return Parser._unescape_re.sub(Parser._unescape_fn, value) return value
[ "def", "_trim_value", "(", "self", ",", "value", ")", ":", "if", "value", "[", "0", "]", "==", "'\"'", ":", "assert", "value", "[", "-", "1", "]", "==", "'\"'", "value", "=", "value", "[", "1", ":", "-", "1", "]", ".", "replace", "(", "'\\\\\"'...
Trim double quotes off the ends of a value, un-escaping inner double quotes and literal backslashes. Also convert escapes to unicode. If the string is not quoted, return it unmodified.
[ "Trim", "double", "quotes", "off", "the", "ends", "of", "a", "value", "un", "-", "escaping", "inner", "double", "quotes", "and", "literal", "backslashes", ".", "Also", "convert", "escapes", "to", "unicode", ".", "If", "the", "string", "is", "not", "quoted"...
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L221-L231
13,581
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
Parser._fail
def _fail(self, message, text, i): """Raise an exception with given message and text at i.""" raise ValueError("{}:\n{}".format(message, text[i : i + 79]))
python
def _fail(self, message, text, i): raise ValueError("{}:\n{}".format(message, text[i : i + 79]))
[ "def", "_fail", "(", "self", ",", "message", ",", "text", ",", "i", ")", ":", "raise", "ValueError", "(", "\"{}:\\n{}\"", ".", "format", "(", "message", ",", "text", "[", "i", ":", "i", "+", "79", "]", ")", ")" ]
Raise an exception with given message and text at i.
[ "Raise", "an", "exception", "with", "given", "message", "and", "text", "at", "i", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L233-L236
13,582
googlefonts/glyphsLib
Lib/glyphsLib/builder/names.py
build_stylemap_names
def build_stylemap_names( family_name, style_name, is_bold=False, is_italic=False, linked_style=None ): """Build UFO `styleMapFamilyName` and `styleMapStyleName` based on the family and style names, and the entries in the "Style Linking" section of the "Instances" tab in the "Font Info". The value of `styleMapStyleName` can be either "regular", "bold", "italic" or "bold italic", depending on the values of `is_bold` and `is_italic`. The `styleMapFamilyName` is a combination of the `family_name` and the `linked_style`. If `linked_style` is unset or set to 'Regular', the linked style is equal to the style_name with the last occurrences of the strings 'Regular', 'Bold' and 'Italic' stripped from it. """ styleMapStyleName = ( " ".join( s for s in ("bold" if is_bold else "", "italic" if is_italic else "") if s ) or "regular" ) if not linked_style or linked_style == "Regular": linked_style = _get_linked_style(style_name, is_bold, is_italic) if linked_style: styleMapFamilyName = (family_name or "") + " " + linked_style else: styleMapFamilyName = family_name return styleMapFamilyName, styleMapStyleName
python
def build_stylemap_names( family_name, style_name, is_bold=False, is_italic=False, linked_style=None ): styleMapStyleName = ( " ".join( s for s in ("bold" if is_bold else "", "italic" if is_italic else "") if s ) or "regular" ) if not linked_style or linked_style == "Regular": linked_style = _get_linked_style(style_name, is_bold, is_italic) if linked_style: styleMapFamilyName = (family_name or "") + " " + linked_style else: styleMapFamilyName = family_name return styleMapFamilyName, styleMapStyleName
[ "def", "build_stylemap_names", "(", "family_name", ",", "style_name", ",", "is_bold", "=", "False", ",", "is_italic", "=", "False", ",", "linked_style", "=", "None", ")", ":", "styleMapStyleName", "=", "(", "\" \"", ".", "join", "(", "s", "for", "s", "in",...
Build UFO `styleMapFamilyName` and `styleMapStyleName` based on the family and style names, and the entries in the "Style Linking" section of the "Instances" tab in the "Font Info". The value of `styleMapStyleName` can be either "regular", "bold", "italic" or "bold italic", depending on the values of `is_bold` and `is_italic`. The `styleMapFamilyName` is a combination of the `family_name` and the `linked_style`. If `linked_style` is unset or set to 'Regular', the linked style is equal to the style_name with the last occurrences of the strings 'Regular', 'Bold' and 'Italic' stripped from it.
[ "Build", "UFO", "styleMapFamilyName", "and", "styleMapStyleName", "based", "on", "the", "family", "and", "style", "names", "and", "the", "entries", "in", "the", "Style", "Linking", "section", "of", "the", "Instances", "tab", "in", "the", "Font", "Info", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/names.py#L55-L85
13,583
googlefonts/glyphsLib
Lib/glyphsLib/builder/blue_values.py
to_ufo_blue_values
def to_ufo_blue_values(self, ufo, master): """Set postscript blue values from Glyphs alignment zones.""" alignment_zones = master.alignmentZones blue_values = [] other_blues = [] for zone in sorted(alignment_zones): pos = zone.position size = zone.size val_list = blue_values if pos == 0 or size >= 0 else other_blues val_list.extend(sorted((pos, pos + size))) ufo.info.postscriptBlueValues = blue_values ufo.info.postscriptOtherBlues = other_blues
python
def to_ufo_blue_values(self, ufo, master): alignment_zones = master.alignmentZones blue_values = [] other_blues = [] for zone in sorted(alignment_zones): pos = zone.position size = zone.size val_list = blue_values if pos == 0 or size >= 0 else other_blues val_list.extend(sorted((pos, pos + size))) ufo.info.postscriptBlueValues = blue_values ufo.info.postscriptOtherBlues = other_blues
[ "def", "to_ufo_blue_values", "(", "self", ",", "ufo", ",", "master", ")", ":", "alignment_zones", "=", "master", ".", "alignmentZones", "blue_values", "=", "[", "]", "other_blues", "=", "[", "]", "for", "zone", "in", "sorted", "(", "alignment_zones", ")", ...
Set postscript blue values from Glyphs alignment zones.
[ "Set", "postscript", "blue", "values", "from", "Glyphs", "alignment", "zones", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/blue_values.py#L18-L31
13,584
googlefonts/glyphsLib
Lib/glyphsLib/builder/blue_values.py
to_glyphs_blue_values
def to_glyphs_blue_values(self, ufo, master): """Sets the GSFontMaster alignmentZones from the postscript blue values.""" zones = [] blue_values = _pairs(ufo.info.postscriptBlueValues) other_blues = _pairs(ufo.info.postscriptOtherBlues) for y1, y2 in blue_values: size = y2 - y1 if y2 == 0: pos = 0 size = -size else: pos = y1 zones.append(self.glyphs_module.GSAlignmentZone(pos, size)) for y1, y2 in other_blues: size = y1 - y2 pos = y2 zones.append(self.glyphs_module.GSAlignmentZone(pos, size)) master.alignmentZones = sorted(zones, key=lambda zone: -zone.position)
python
def to_glyphs_blue_values(self, ufo, master): zones = [] blue_values = _pairs(ufo.info.postscriptBlueValues) other_blues = _pairs(ufo.info.postscriptOtherBlues) for y1, y2 in blue_values: size = y2 - y1 if y2 == 0: pos = 0 size = -size else: pos = y1 zones.append(self.glyphs_module.GSAlignmentZone(pos, size)) for y1, y2 in other_blues: size = y1 - y2 pos = y2 zones.append(self.glyphs_module.GSAlignmentZone(pos, size)) master.alignmentZones = sorted(zones, key=lambda zone: -zone.position)
[ "def", "to_glyphs_blue_values", "(", "self", ",", "ufo", ",", "master", ")", ":", "zones", "=", "[", "]", "blue_values", "=", "_pairs", "(", "ufo", ".", "info", ".", "postscriptBlueValues", ")", "other_blues", "=", "_pairs", "(", "ufo", ".", "info", ".",...
Sets the GSFontMaster alignmentZones from the postscript blue values.
[ "Sets", "the", "GSFontMaster", "alignmentZones", "from", "the", "postscript", "blue", "values", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/blue_values.py#L34-L53
13,585
googlefonts/glyphsLib
Lib/glyphsLib/builder/filters.py
parse_glyphs_filter
def parse_glyphs_filter(filter_str, is_pre=False): """Parses glyphs custom filter string into a dict object that ufo2ft can consume. Reference: ufo2ft: https://github.com/googlei18n/ufo2ft Glyphs 2.3 Handbook July 2016, p184 Args: filter_str - a string of glyphs app filter Return: A dictionary contains the structured filter. Return None if parse failed. """ elements = filter_str.split(";") if elements[0] == "": logger.error( "Failed to parse glyphs filter, expecting a filter name: \ %s", filter_str, ) return None result = {"name": elements[0]} for idx, elem in enumerate(elements[1:]): if not elem: # skip empty arguments continue if ":" in elem: # Key value pair key, value = elem.split(":", 1) if key.lower() in ["include", "exclude"]: if idx != len(elements[1:]) - 1: logger.error( "{} can only present as the last argument in the filter. " "{} is ignored.".format(key, elem) ) continue result[key.lower()] = re.split("[ ,]+", value) else: if "kwargs" not in result: result["kwargs"] = {} result["kwargs"][key] = cast_to_number_or_bool(value) else: if "args" not in result: result["args"] = [] result["args"].append(cast_to_number_or_bool(elem)) if is_pre: result["pre"] = True return result
python
def parse_glyphs_filter(filter_str, is_pre=False): elements = filter_str.split(";") if elements[0] == "": logger.error( "Failed to parse glyphs filter, expecting a filter name: \ %s", filter_str, ) return None result = {"name": elements[0]} for idx, elem in enumerate(elements[1:]): if not elem: # skip empty arguments continue if ":" in elem: # Key value pair key, value = elem.split(":", 1) if key.lower() in ["include", "exclude"]: if idx != len(elements[1:]) - 1: logger.error( "{} can only present as the last argument in the filter. " "{} is ignored.".format(key, elem) ) continue result[key.lower()] = re.split("[ ,]+", value) else: if "kwargs" not in result: result["kwargs"] = {} result["kwargs"][key] = cast_to_number_or_bool(value) else: if "args" not in result: result["args"] = [] result["args"].append(cast_to_number_or_bool(elem)) if is_pre: result["pre"] = True return result
[ "def", "parse_glyphs_filter", "(", "filter_str", ",", "is_pre", "=", "False", ")", ":", "elements", "=", "filter_str", ".", "split", "(", "\";\"", ")", "if", "elements", "[", "0", "]", "==", "\"\"", ":", "logger", ".", "error", "(", "\"Failed to parse glyp...
Parses glyphs custom filter string into a dict object that ufo2ft can consume. Reference: ufo2ft: https://github.com/googlei18n/ufo2ft Glyphs 2.3 Handbook July 2016, p184 Args: filter_str - a string of glyphs app filter Return: A dictionary contains the structured filter. Return None if parse failed.
[ "Parses", "glyphs", "custom", "filter", "string", "into", "a", "dict", "object", "that", "ufo2ft", "can", "consume", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/filters.py#L25-L76
13,586
googlefonts/glyphsLib
Lib/glyphsLib/util.py
build_ufo_path
def build_ufo_path(out_dir, family_name, style_name): """Build string to use as a UFO path.""" return os.path.join( out_dir, "%s-%s.ufo" % ((family_name or "").replace(" ", ""), (style_name or "").replace(" ", "")), )
python
def build_ufo_path(out_dir, family_name, style_name): return os.path.join( out_dir, "%s-%s.ufo" % ((family_name or "").replace(" ", ""), (style_name or "").replace(" ", "")), )
[ "def", "build_ufo_path", "(", "out_dir", ",", "family_name", ",", "style_name", ")", ":", "return", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-%s.ufo\"", "%", "(", "(", "family_name", "or", "\"\"", ")", ".", "replace", "(", "\" \"", ",", ...
Build string to use as a UFO path.
[ "Build", "string", "to", "use", "as", "a", "UFO", "path", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/util.py#L26-L33
13,587
googlefonts/glyphsLib
Lib/glyphsLib/util.py
write_ufo
def write_ufo(ufo, out_dir): """Write a UFO.""" out_path = build_ufo_path(out_dir, ufo.info.familyName, ufo.info.styleName) logger.info("Writing %s" % out_path) clean_ufo(out_path) ufo.save(out_path)
python
def write_ufo(ufo, out_dir): out_path = build_ufo_path(out_dir, ufo.info.familyName, ufo.info.styleName) logger.info("Writing %s" % out_path) clean_ufo(out_path) ufo.save(out_path)
[ "def", "write_ufo", "(", "ufo", ",", "out_dir", ")", ":", "out_path", "=", "build_ufo_path", "(", "out_dir", ",", "ufo", ".", "info", ".", "familyName", ",", "ufo", ".", "info", ".", "styleName", ")", "logger", ".", "info", "(", "\"Writing %s\"", "%", ...
Write a UFO.
[ "Write", "a", "UFO", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/util.py#L36-L43
13,588
googlefonts/glyphsLib
Lib/glyphsLib/util.py
clean_ufo
def clean_ufo(path): """Make sure old UFO data is removed, as it may contain deleted glyphs.""" if path.endswith(".ufo") and os.path.exists(path): shutil.rmtree(path)
python
def clean_ufo(path): if path.endswith(".ufo") and os.path.exists(path): shutil.rmtree(path)
[ "def", "clean_ufo", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "\".ufo\"", ")", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")" ]
Make sure old UFO data is removed, as it may contain deleted glyphs.
[ "Make", "sure", "old", "UFO", "data", "is", "removed", "as", "it", "may", "contain", "deleted", "glyphs", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/util.py#L46-L50
13,589
googlefonts/glyphsLib
Lib/glyphsLib/util.py
ufo_create_background_layer_for_all_glyphs
def ufo_create_background_layer_for_all_glyphs(ufo_font): # type: (defcon.Font) -> None """Create a background layer for all glyphs in ufo_font if not present to reduce roundtrip differences.""" if "public.background" in ufo_font.layers: background = ufo_font.layers["public.background"] else: background = ufo_font.newLayer("public.background") for glyph in ufo_font: if glyph.name not in background: background.newGlyph(glyph.name)
python
def ufo_create_background_layer_for_all_glyphs(ufo_font): # type: (defcon.Font) -> None if "public.background" in ufo_font.layers: background = ufo_font.layers["public.background"] else: background = ufo_font.newLayer("public.background") for glyph in ufo_font: if glyph.name not in background: background.newGlyph(glyph.name)
[ "def", "ufo_create_background_layer_for_all_glyphs", "(", "ufo_font", ")", ":", "# type: (defcon.Font) -> None", "if", "\"public.background\"", "in", "ufo_font", ".", "layers", ":", "background", "=", "ufo_font", ".", "layers", "[", "\"public.background\"", "]", "else", ...
Create a background layer for all glyphs in ufo_font if not present to reduce roundtrip differences.
[ "Create", "a", "background", "layer", "for", "all", "glyphs", "in", "ufo_font", "if", "not", "present", "to", "reduce", "roundtrip", "differences", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/util.py#L53-L65
13,590
googlefonts/glyphsLib
Lib/glyphsLib/util.py
cast_to_number_or_bool
def cast_to_number_or_bool(inputstr): """Cast a string to int, float or bool. Return original string if it can't be converted. Scientific expression is converted into float. """ if inputstr.strip().lower() == "true": return True elif inputstr.strip().lower() == "false": return False try: return int(inputstr) except ValueError: try: return float(inputstr) except ValueError: return inputstr
python
def cast_to_number_or_bool(inputstr): if inputstr.strip().lower() == "true": return True elif inputstr.strip().lower() == "false": return False try: return int(inputstr) except ValueError: try: return float(inputstr) except ValueError: return inputstr
[ "def", "cast_to_number_or_bool", "(", "inputstr", ")", ":", "if", "inputstr", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "\"true\"", ":", "return", "True", "elif", "inputstr", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "\"false\""...
Cast a string to int, float or bool. Return original string if it can't be converted. Scientific expression is converted into float.
[ "Cast", "a", "string", "to", "int", "float", "or", "bool", ".", "Return", "original", "string", "if", "it", "can", "t", "be", "converted", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/util.py#L68-L84
13,591
googlefonts/glyphsLib
Lib/glyphsLib/builder/background_image.py
to_ufo_background_image
def to_ufo_background_image(self, ufo_glyph, layer): """Copy the backgound image from the GSLayer to the UFO Glyph.""" image = layer.backgroundImage if image is None: return ufo_image = ufo_glyph.image ufo_image.fileName = image.path ufo_image.transformation = image.transform ufo_glyph.lib[CROP_KEY] = list(image.crop) ufo_glyph.lib[LOCKED_KEY] = image.locked ufo_glyph.lib[ALPHA_KEY] = image.alpha
python
def to_ufo_background_image(self, ufo_glyph, layer): image = layer.backgroundImage if image is None: return ufo_image = ufo_glyph.image ufo_image.fileName = image.path ufo_image.transformation = image.transform ufo_glyph.lib[CROP_KEY] = list(image.crop) ufo_glyph.lib[LOCKED_KEY] = image.locked ufo_glyph.lib[ALPHA_KEY] = image.alpha
[ "def", "to_ufo_background_image", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "image", "=", "layer", ".", "backgroundImage", "if", "image", "is", "None", ":", "return", "ufo_image", "=", "ufo_glyph", ".", "image", "ufo_image", ".", "fileName", "="...
Copy the backgound image from the GSLayer to the UFO Glyph.
[ "Copy", "the", "backgound", "image", "from", "the", "GSLayer", "to", "the", "UFO", "Glyph", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/background_image.py#L26-L36
13,592
googlefonts/glyphsLib
Lib/glyphsLib/builder/background_image.py
to_glyphs_background_image
def to_glyphs_background_image(self, ufo_glyph, layer): """Copy the background image from the UFO Glyph to the GSLayer.""" ufo_image = ufo_glyph.image if ufo_image.fileName is None: return image = self.glyphs_module.GSBackgroundImage() image.path = ufo_image.fileName image.transform = Transform(*ufo_image.transformation) if CROP_KEY in ufo_glyph.lib: x, y, w, h = ufo_glyph.lib[CROP_KEY] image.crop = Rect(Point(x, y), Size(w, h)) if LOCKED_KEY in ufo_glyph.lib: image.locked = ufo_glyph.lib[LOCKED_KEY] if ALPHA_KEY in ufo_glyph.lib: image.alpha = ufo_glyph.lib[ALPHA_KEY] layer.backgroundImage = image
python
def to_glyphs_background_image(self, ufo_glyph, layer): ufo_image = ufo_glyph.image if ufo_image.fileName is None: return image = self.glyphs_module.GSBackgroundImage() image.path = ufo_image.fileName image.transform = Transform(*ufo_image.transformation) if CROP_KEY in ufo_glyph.lib: x, y, w, h = ufo_glyph.lib[CROP_KEY] image.crop = Rect(Point(x, y), Size(w, h)) if LOCKED_KEY in ufo_glyph.lib: image.locked = ufo_glyph.lib[LOCKED_KEY] if ALPHA_KEY in ufo_glyph.lib: image.alpha = ufo_glyph.lib[ALPHA_KEY] layer.backgroundImage = image
[ "def", "to_glyphs_background_image", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "ufo_image", "=", "ufo_glyph", ".", "image", "if", "ufo_image", ".", "fileName", "is", "None", ":", "return", "image", "=", "self", ".", "glyphs_module", ".", "GSBack...
Copy the background image from the UFO Glyph to the GSLayer.
[ "Copy", "the", "background", "image", "from", "the", "UFO", "Glyph", "to", "the", "GSLayer", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/background_image.py#L39-L54
13,593
4Catalyzer/flask-resty
flask_resty/api.py
Api.add_resource
def add_resource( self, base_rule, base_view, alternate_view=None, alternate_rule=None, id_rule=None, app=None, ): """Add route or routes for a resource. :param str base_rule: The URL rule for the resource. This will be prefixed by the API prefix. :param base_view: Class-based view for the resource. :param alternate_view: If specified, an alternate class-based view for the resource. Usually, this will be a detail view, when the base view is a list view. :param alternate_rule: If specified, the URL rule for the alternate view. This will be prefixed by the API prefix. This is mutually exclusive with id_rule, and must not be specified if alternate_view is not specified. :type alternate_rule: str or None :param id_rule: If specified, a suffix to append to base_rule to get the alternate view URL rule. If alternate_view is specified, and alternate_rule is not, then this defaults to '<id>'. This is mutually exclusive with alternate_rule, and must not be specified if alternate_view is not specified. :type id_rule: str or None :param app: If specified, the application to which to add the route(s). Otherwise, this will be the bound application, if present. """ if alternate_view: if not alternate_rule: id_rule = id_rule or DEFAULT_ID_RULE alternate_rule = posixpath.join(base_rule, id_rule) else: assert id_rule is None else: assert alternate_rule is None assert id_rule is None app = self._get_app(app) endpoint = self._get_endpoint(base_view, alternate_view) # Store the view rules for reference. Doesn't support multiple routes # mapped to same view. views = app.extensions['resty'].views base_rule_full = '{}{}'.format(self.prefix, base_rule) base_view_func = base_view.as_view(endpoint) if not alternate_view: app.add_url_rule(base_rule_full, view_func=base_view_func) views[base_view] = Resource(base_view, base_rule_full) return alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule) alternate_view_func = alternate_view.as_view(endpoint) @functools.wraps(base_view_func) def view_func(*args, **kwargs): if flask.request.url_rule.rule == base_rule_full: return base_view_func(*args, **kwargs) else: return alternate_view_func(*args, **kwargs) app.add_url_rule( base_rule_full, view_func=view_func, endpoint=endpoint, methods=base_view.methods, ) app.add_url_rule( alternate_rule_full, view_func=view_func, endpoint=endpoint, methods=alternate_view.methods, ) views[base_view] = Resource(base_view, base_rule_full) views[alternate_view] = Resource(alternate_view, alternate_rule_full)
python
def add_resource( self, base_rule, base_view, alternate_view=None, alternate_rule=None, id_rule=None, app=None, ): if alternate_view: if not alternate_rule: id_rule = id_rule or DEFAULT_ID_RULE alternate_rule = posixpath.join(base_rule, id_rule) else: assert id_rule is None else: assert alternate_rule is None assert id_rule is None app = self._get_app(app) endpoint = self._get_endpoint(base_view, alternate_view) # Store the view rules for reference. Doesn't support multiple routes # mapped to same view. views = app.extensions['resty'].views base_rule_full = '{}{}'.format(self.prefix, base_rule) base_view_func = base_view.as_view(endpoint) if not alternate_view: app.add_url_rule(base_rule_full, view_func=base_view_func) views[base_view] = Resource(base_view, base_rule_full) return alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule) alternate_view_func = alternate_view.as_view(endpoint) @functools.wraps(base_view_func) def view_func(*args, **kwargs): if flask.request.url_rule.rule == base_rule_full: return base_view_func(*args, **kwargs) else: return alternate_view_func(*args, **kwargs) app.add_url_rule( base_rule_full, view_func=view_func, endpoint=endpoint, methods=base_view.methods, ) app.add_url_rule( alternate_rule_full, view_func=view_func, endpoint=endpoint, methods=alternate_view.methods, ) views[base_view] = Resource(base_view, base_rule_full) views[alternate_view] = Resource(alternate_view, alternate_rule_full)
[ "def", "add_resource", "(", "self", ",", "base_rule", ",", "base_view", ",", "alternate_view", "=", "None", ",", "alternate_rule", "=", "None", ",", "id_rule", "=", "None", ",", "app", "=", "None", ",", ")", ":", "if", "alternate_view", ":", "if", "not",...
Add route or routes for a resource. :param str base_rule: The URL rule for the resource. This will be prefixed by the API prefix. :param base_view: Class-based view for the resource. :param alternate_view: If specified, an alternate class-based view for the resource. Usually, this will be a detail view, when the base view is a list view. :param alternate_rule: If specified, the URL rule for the alternate view. This will be prefixed by the API prefix. This is mutually exclusive with id_rule, and must not be specified if alternate_view is not specified. :type alternate_rule: str or None :param id_rule: If specified, a suffix to append to base_rule to get the alternate view URL rule. If alternate_view is specified, and alternate_rule is not, then this defaults to '<id>'. This is mutually exclusive with alternate_rule, and must not be specified if alternate_view is not specified. :type id_rule: str or None :param app: If specified, the application to which to add the route(s). Otherwise, this will be the bound application, if present.
[ "Add", "route", "or", "routes", "for", "a", "resource", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/api.py#L61-L137
13,594
4Catalyzer/flask-resty
flask_resty/api.py
Api.add_ping
def add_ping(self, rule, status_code=200, app=None): """Add a ping route. :param str rule: The URL rule. This will not use the API prefix, as the ping endpoint is not really part of the API. :param int status_code: The ping response status code. The default is 200 rather than the more correct 204 because many health checks look for 200s. :param app: If specified, the application to which to add the route. Otherwise, this will be the bound application, if present. """ app = self._get_app(app) @app.route(rule) def ping(): return '', status_code
python
def add_ping(self, rule, status_code=200, app=None): app = self._get_app(app) @app.route(rule) def ping(): return '', status_code
[ "def", "add_ping", "(", "self", ",", "rule", ",", "status_code", "=", "200", ",", "app", "=", "None", ")", ":", "app", "=", "self", ".", "_get_app", "(", "app", ")", "@", "app", ".", "route", "(", "rule", ")", "def", "ping", "(", ")", ":", "ret...
Add a ping route. :param str rule: The URL rule. This will not use the API prefix, as the ping endpoint is not really part of the API. :param int status_code: The ping response status code. The default is 200 rather than the more correct 204 because many health checks look for 200s. :param app: If specified, the application to which to add the route. Otherwise, this will be the bound application, if present.
[ "Add", "a", "ping", "route", "." ]
a8b6502a799c270ca9ce41c6d8b7297713942097
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/api.py#L150-L165
13,595
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
UFOBuilder.masters
def masters(self): """Get an iterator over master UFOs that match the given family_name. """ if self._sources: for source in self._sources.values(): yield source.font return # Store set of actually existing master (layer) ids. This helps with # catching dangling layer data that Glyphs may ignore, e.g. when # copying glyphs from other fonts with, naturally, different master # ids. Note: Masters have unique ids according to the Glyphs # documentation and can therefore be stored in a set. master_layer_ids = {m.id for m in self.font.masters} # stores background data from "associated layers" supplementary_layer_data = [] # TODO(jamesgk) maybe create one font at a time to reduce memory usage # TODO: (jany) in the future, return a lazy iterator that builds UFOs # on demand. self.to_ufo_font_attributes(self.family_name) # Generate the main (master) layers first. for glyph in self.font.glyphs: for layer in glyph.layers.values(): if layer.associatedMasterId != layer.layerId: # The layer is not the main layer of a master # Store all layers, even the invalid ones, and just skip # them and print a warning below. supplementary_layer_data.append((glyph, layer)) continue ufo_layer = self.to_ufo_layer(glyph, layer) ufo_glyph = ufo_layer.newGlyph(glyph.name) self.to_ufo_glyph(ufo_glyph, layer, glyph) # And sublayers (brace, bracket, ...) second. for glyph, layer in supplementary_layer_data: if ( layer.layerId not in master_layer_ids and layer.associatedMasterId not in master_layer_ids ): if self.minimize_glyphs_diffs: self.logger.warning( '{}, glyph "{}": Layer "{}" is dangling and will be ' "skipped. Did you copy a glyph from a different font?" " If so, you should clean up any phantom layers not " "associated with an actual master.".format( self.font.familyName, glyph.name, layer.layerId ) ) continue if not layer.name: # Empty layer names are invalid according to the UFO spec. if self.minimize_glyphs_diffs: self.logger.warning( '{}, glyph "{}": Contains layer without a name which ' "will be skipped.".format(self.font.familyName, glyph.name) ) continue # Save processing bracket layers for when designspace() is called, as we # have to extract them to free-standing glyphs. if ( "[" in layer.name and "]" in layer.name and ".background" not in layer.name ): self.bracket_layers.append(layer) else: ufo_layer = self.to_ufo_layer(glyph, layer) ufo_glyph = ufo_layer.newGlyph(glyph.name) self.to_ufo_glyph(ufo_glyph, layer, layer.parent) for source in self._sources.values(): ufo = source.font if self.propagate_anchors: self.to_ufo_propagate_font_anchors(ufo) for layer in ufo.layers: self.to_ufo_layer_lib(layer) # Sanitize skip list and write it to both Designspace- and UFO-level lib keys. # The latter is unnecessary when using e.g. the `ufo2ft.compile*FromDS` # functions, but the data may take a different path. Writing it everywhere can # save on surprises/logic in other software. skip_export_glyphs = self._designspace.lib.get("public.skipExportGlyphs") if skip_export_glyphs is not None: skip_export_glyphs = sorted(set(skip_export_glyphs)) self._designspace.lib["public.skipExportGlyphs"] = skip_export_glyphs for source in self._sources.values(): source.font.lib["public.skipExportGlyphs"] = skip_export_glyphs self.to_ufo_features() # This depends on the glyphOrder key self.to_ufo_groups() self.to_ufo_kerning() for source in self._sources.values(): yield source.font
python
def masters(self): if self._sources: for source in self._sources.values(): yield source.font return # Store set of actually existing master (layer) ids. This helps with # catching dangling layer data that Glyphs may ignore, e.g. when # copying glyphs from other fonts with, naturally, different master # ids. Note: Masters have unique ids according to the Glyphs # documentation and can therefore be stored in a set. master_layer_ids = {m.id for m in self.font.masters} # stores background data from "associated layers" supplementary_layer_data = [] # TODO(jamesgk) maybe create one font at a time to reduce memory usage # TODO: (jany) in the future, return a lazy iterator that builds UFOs # on demand. self.to_ufo_font_attributes(self.family_name) # Generate the main (master) layers first. for glyph in self.font.glyphs: for layer in glyph.layers.values(): if layer.associatedMasterId != layer.layerId: # The layer is not the main layer of a master # Store all layers, even the invalid ones, and just skip # them and print a warning below. supplementary_layer_data.append((glyph, layer)) continue ufo_layer = self.to_ufo_layer(glyph, layer) ufo_glyph = ufo_layer.newGlyph(glyph.name) self.to_ufo_glyph(ufo_glyph, layer, glyph) # And sublayers (brace, bracket, ...) second. for glyph, layer in supplementary_layer_data: if ( layer.layerId not in master_layer_ids and layer.associatedMasterId not in master_layer_ids ): if self.minimize_glyphs_diffs: self.logger.warning( '{}, glyph "{}": Layer "{}" is dangling and will be ' "skipped. Did you copy a glyph from a different font?" " If so, you should clean up any phantom layers not " "associated with an actual master.".format( self.font.familyName, glyph.name, layer.layerId ) ) continue if not layer.name: # Empty layer names are invalid according to the UFO spec. if self.minimize_glyphs_diffs: self.logger.warning( '{}, glyph "{}": Contains layer without a name which ' "will be skipped.".format(self.font.familyName, glyph.name) ) continue # Save processing bracket layers for when designspace() is called, as we # have to extract them to free-standing glyphs. if ( "[" in layer.name and "]" in layer.name and ".background" not in layer.name ): self.bracket_layers.append(layer) else: ufo_layer = self.to_ufo_layer(glyph, layer) ufo_glyph = ufo_layer.newGlyph(glyph.name) self.to_ufo_glyph(ufo_glyph, layer, layer.parent) for source in self._sources.values(): ufo = source.font if self.propagate_anchors: self.to_ufo_propagate_font_anchors(ufo) for layer in ufo.layers: self.to_ufo_layer_lib(layer) # Sanitize skip list and write it to both Designspace- and UFO-level lib keys. # The latter is unnecessary when using e.g. the `ufo2ft.compile*FromDS` # functions, but the data may take a different path. Writing it everywhere can # save on surprises/logic in other software. skip_export_glyphs = self._designspace.lib.get("public.skipExportGlyphs") if skip_export_glyphs is not None: skip_export_glyphs = sorted(set(skip_export_glyphs)) self._designspace.lib["public.skipExportGlyphs"] = skip_export_glyphs for source in self._sources.values(): source.font.lib["public.skipExportGlyphs"] = skip_export_glyphs self.to_ufo_features() # This depends on the glyphOrder key self.to_ufo_groups() self.to_ufo_kerning() for source in self._sources.values(): yield source.font
[ "def", "masters", "(", "self", ")", ":", "if", "self", ".", "_sources", ":", "for", "source", "in", "self", ".", "_sources", ".", "values", "(", ")", ":", "yield", "source", ".", "font", "return", "# Store set of actually existing master (layer) ids. This helps ...
Get an iterator over master UFOs that match the given family_name.
[ "Get", "an", "iterator", "over", "master", "UFOs", "that", "match", "the", "given", "family_name", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L128-L227
13,596
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
UFOBuilder.designspace
def designspace(self): """Get a designspace Document instance that links the masters together and holds instance data. """ if self._designspace_is_complete: return self._designspace self._designspace_is_complete = True list(self.masters) # Make sure that the UFOs are built self.to_designspace_axes() self.to_designspace_sources() self.to_designspace_instances() self.to_designspace_family_user_data() if self.bracket_layers: self._apply_bracket_layers() # append base style shared by all masters to designspace file name base_family = self.family_name or "Unnamed" base_style = find_base_style(self.font.masters) if base_style: base_style = "-" + base_style name = (base_family + base_style).replace(" ", "") + ".designspace" self.designspace.filename = name return self._designspace
python
def designspace(self): if self._designspace_is_complete: return self._designspace self._designspace_is_complete = True list(self.masters) # Make sure that the UFOs are built self.to_designspace_axes() self.to_designspace_sources() self.to_designspace_instances() self.to_designspace_family_user_data() if self.bracket_layers: self._apply_bracket_layers() # append base style shared by all masters to designspace file name base_family = self.family_name or "Unnamed" base_style = find_base_style(self.font.masters) if base_style: base_style = "-" + base_style name = (base_family + base_style).replace(" ", "") + ".designspace" self.designspace.filename = name return self._designspace
[ "def", "designspace", "(", "self", ")", ":", "if", "self", ".", "_designspace_is_complete", ":", "return", "self", ".", "_designspace", "self", ".", "_designspace_is_complete", "=", "True", "list", "(", "self", ".", "masters", ")", "# Make sure that the UFOs are b...
Get a designspace Document instance that links the masters together and holds instance data.
[ "Get", "a", "designspace", "Document", "instance", "that", "links", "the", "masters", "together", "and", "holds", "instance", "data", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L230-L255
13,597
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
GlyphsBuilder.font
def font(self): """Get the GSFont built from the UFOs + designspace.""" if self._font is not None: return self._font # Sort UFOS in the original order from the Glyphs file sorted_sources = self.to_glyphs_ordered_masters() # Convert all full source UFOs to Glyphs masters. Sources with layer names # are assumed to be sparse or "brace" layers and are ignored because Glyphs # considers them to be special layers and will handle them itself. self._font = self.glyphs_module.GSFont() self._sources = OrderedDict() # Same as in UFOBuilder for index, source in enumerate(s for s in sorted_sources if not s.layerName): master = self.glyphs_module.GSFontMaster() # Filter bracket glyphs out of public.glyphOrder. if GLYPH_ORDER_KEY in source.font.lib: source.font.lib[GLYPH_ORDER_KEY] = [ glyph_name for glyph_name in source.font.lib[GLYPH_ORDER_KEY] if ".BRACKET." not in glyph_name ] self.to_glyphs_font_attributes(source, master, is_initial=(index == 0)) self.to_glyphs_master_attributes(source, master) self._font.masters.insert(len(self._font.masters), master) self._sources[master.id] = source # First, move free-standing bracket glyphs back to layers to avoid dealing # with GSLayer transplantation. for bracket_glyph in [g for g in source.font if ".BRACKET." in g.name]: base_glyph, threshold = bracket_glyph.name.split(".BRACKET.") try: int(threshold) except ValueError: raise ValueError( "Glyph '{}' has malformed bracket layer name. Must be '<glyph " "name>.BRACKET.<crossover value>'.".format(bracket_glyph) ) layer_name = bracket_glyph.lib.get( GLYPHLIB_PREFIX + "_originalLayerName", "[{}]".format(threshold) ) if layer_name not in source.font.layers: ufo_layer = source.font.newLayer(layer_name) else: ufo_layer = source.font.layers[layer_name] bracket_glyph_new = ufo_layer.newGlyph(base_glyph) bracket_glyph_new.copyDataFromGlyph(bracket_glyph) # Remove all freestanding bracket layer glyphs from all layers. for layer in source.font.layers: if bracket_glyph.name in layer: del layer[bracket_glyph.name] for layer in _sorted_backgrounds_last(source.font.layers): self.to_glyphs_layer_lib(layer) for glyph in layer: self.to_glyphs_glyph(glyph, layer, master) self.to_glyphs_features() self.to_glyphs_groups() self.to_glyphs_kerning() # Now that all GSGlyph are built, restore the glyph order if self.designspace.sources: first_ufo = self.designspace.sources[0].font if GLYPH_ORDER_KEY in first_ufo.lib: glyph_order = first_ufo.lib[GLYPH_ORDER_KEY] lookup = {name: i for i, name in enumerate(glyph_order)} self.font.glyphs = sorted( self.font.glyphs, key=lambda glyph: lookup.get(glyph.name, 1 << 63) ) # FIXME: (jany) We only do that on the first one. Maybe we should # merge the various `public.glyphorder` values? # Restore the layer ordering in each glyph for glyph in self._font.glyphs: self.to_glyphs_layer_order(glyph) self.to_glyphs_family_user_data_from_designspace() self.to_glyphs_axes() self.to_glyphs_sources() self.to_glyphs_instances() return self._font
python
def font(self): if self._font is not None: return self._font # Sort UFOS in the original order from the Glyphs file sorted_sources = self.to_glyphs_ordered_masters() # Convert all full source UFOs to Glyphs masters. Sources with layer names # are assumed to be sparse or "brace" layers and are ignored because Glyphs # considers them to be special layers and will handle them itself. self._font = self.glyphs_module.GSFont() self._sources = OrderedDict() # Same as in UFOBuilder for index, source in enumerate(s for s in sorted_sources if not s.layerName): master = self.glyphs_module.GSFontMaster() # Filter bracket glyphs out of public.glyphOrder. if GLYPH_ORDER_KEY in source.font.lib: source.font.lib[GLYPH_ORDER_KEY] = [ glyph_name for glyph_name in source.font.lib[GLYPH_ORDER_KEY] if ".BRACKET." not in glyph_name ] self.to_glyphs_font_attributes(source, master, is_initial=(index == 0)) self.to_glyphs_master_attributes(source, master) self._font.masters.insert(len(self._font.masters), master) self._sources[master.id] = source # First, move free-standing bracket glyphs back to layers to avoid dealing # with GSLayer transplantation. for bracket_glyph in [g for g in source.font if ".BRACKET." in g.name]: base_glyph, threshold = bracket_glyph.name.split(".BRACKET.") try: int(threshold) except ValueError: raise ValueError( "Glyph '{}' has malformed bracket layer name. Must be '<glyph " "name>.BRACKET.<crossover value>'.".format(bracket_glyph) ) layer_name = bracket_glyph.lib.get( GLYPHLIB_PREFIX + "_originalLayerName", "[{}]".format(threshold) ) if layer_name not in source.font.layers: ufo_layer = source.font.newLayer(layer_name) else: ufo_layer = source.font.layers[layer_name] bracket_glyph_new = ufo_layer.newGlyph(base_glyph) bracket_glyph_new.copyDataFromGlyph(bracket_glyph) # Remove all freestanding bracket layer glyphs from all layers. for layer in source.font.layers: if bracket_glyph.name in layer: del layer[bracket_glyph.name] for layer in _sorted_backgrounds_last(source.font.layers): self.to_glyphs_layer_lib(layer) for glyph in layer: self.to_glyphs_glyph(glyph, layer, master) self.to_glyphs_features() self.to_glyphs_groups() self.to_glyphs_kerning() # Now that all GSGlyph are built, restore the glyph order if self.designspace.sources: first_ufo = self.designspace.sources[0].font if GLYPH_ORDER_KEY in first_ufo.lib: glyph_order = first_ufo.lib[GLYPH_ORDER_KEY] lookup = {name: i for i, name in enumerate(glyph_order)} self.font.glyphs = sorted( self.font.glyphs, key=lambda glyph: lookup.get(glyph.name, 1 << 63) ) # FIXME: (jany) We only do that on the first one. Maybe we should # merge the various `public.glyphorder` values? # Restore the layer ordering in each glyph for glyph in self._font.glyphs: self.to_glyphs_layer_order(glyph) self.to_glyphs_family_user_data_from_designspace() self.to_glyphs_axes() self.to_glyphs_sources() self.to_glyphs_instances() return self._font
[ "def", "font", "(", "self", ")", ":", "if", "self", ".", "_font", "is", "not", "None", ":", "return", "self", ".", "_font", "# Sort UFOS in the original order from the Glyphs file", "sorted_sources", "=", "self", ".", "to_glyphs_ordered_masters", "(", ")", "# Conv...
Get the GSFont built from the UFOs + designspace.
[ "Get", "the", "GSFont", "built", "from", "the", "UFOs", "+", "designspace", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L512-L597
13,598
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
GlyphsBuilder._valid_designspace
def _valid_designspace(self, designspace): """Make sure that the user-provided designspace has loaded fonts and that names are the same as those from the UFOs. """ # TODO: (jany) really make a copy to avoid modifying the original object copy = designspace # Load only full UFO masters, sparse or "brace" layer sources are assumed # to point to existing layers within one of the full masters. for source in (s for s in copy.sources if not s.layerName): if not hasattr(source, "font") or source.font is None: if source.path: # FIXME: (jany) consider not changing the caller's objects source.font = defcon.Font(source.path) else: dirname = os.path.dirname(designspace.path) ufo_path = os.path.join(dirname, source.filename) source.font = defcon.Font(ufo_path) if source.location is None: source.location = {} for name in ("familyName", "styleName"): if getattr(source, name) != getattr(source.font.info, name): self.logger.warning( dedent( """\ The {name} is different between the UFO and the designspace source: source filename: {filename} source {name}: {source_name} ufo {name}: {ufo_name} The UFO name will be used. """ ).format( name=name, filename=source.filename, source_name=getattr(source, name), ufo_name=getattr(source.font.info, name), ) ) setattr(source, name, getattr(source.font.info, name)) return copy
python
def _valid_designspace(self, designspace): # TODO: (jany) really make a copy to avoid modifying the original object copy = designspace # Load only full UFO masters, sparse or "brace" layer sources are assumed # to point to existing layers within one of the full masters. for source in (s for s in copy.sources if not s.layerName): if not hasattr(source, "font") or source.font is None: if source.path: # FIXME: (jany) consider not changing the caller's objects source.font = defcon.Font(source.path) else: dirname = os.path.dirname(designspace.path) ufo_path = os.path.join(dirname, source.filename) source.font = defcon.Font(ufo_path) if source.location is None: source.location = {} for name in ("familyName", "styleName"): if getattr(source, name) != getattr(source.font.info, name): self.logger.warning( dedent( """\ The {name} is different between the UFO and the designspace source: source filename: {filename} source {name}: {source_name} ufo {name}: {ufo_name} The UFO name will be used. """ ).format( name=name, filename=source.filename, source_name=getattr(source, name), ufo_name=getattr(source.font.info, name), ) ) setattr(source, name, getattr(source.font.info, name)) return copy
[ "def", "_valid_designspace", "(", "self", ",", "designspace", ")", ":", "# TODO: (jany) really make a copy to avoid modifying the original object", "copy", "=", "designspace", "# Load only full UFO masters, sparse or \"brace\" layer sources are assumed", "# to point to existing layers with...
Make sure that the user-provided designspace has loaded fonts and that names are the same as those from the UFOs.
[ "Make", "sure", "that", "the", "user", "-", "provided", "designspace", "has", "loaded", "fonts", "and", "that", "names", "are", "the", "same", "as", "those", "from", "the", "UFOs", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L599-L638
13,599
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
GlyphsBuilder._fake_designspace
def _fake_designspace(self, ufos): """Build a fake designspace with the given UFOs as sources, so that all builder functions can rely on the presence of a designspace. """ designspace = designspaceLib.DesignSpaceDocument() ufo_to_location = defaultdict(dict) # Make weight and width axis if relevant for info_key, axis_def in zip( ("openTypeOS2WeightClass", "openTypeOS2WidthClass"), (WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF), ): axis = designspace.newAxisDescriptor() axis.tag = axis_def.tag axis.name = axis_def.name mapping = [] for ufo in ufos: user_loc = getattr(ufo.info, info_key) if user_loc is not None: design_loc = class_to_value(axis_def.tag, user_loc) mapping.append((user_loc, design_loc)) ufo_to_location[ufo][axis_def.name] = design_loc mapping = sorted(set(mapping)) if len(mapping) > 1: axis.map = mapping axis.minimum = min([user_loc for user_loc, _ in mapping]) axis.maximum = max([user_loc for user_loc, _ in mapping]) axis.default = min( axis.maximum, max(axis.minimum, axis_def.default_user_loc) ) designspace.addAxis(axis) for ufo in ufos: source = designspace.newSourceDescriptor() source.font = ufo source.familyName = ufo.info.familyName source.styleName = ufo.info.styleName # source.name = '%s %s' % (source.familyName, source.styleName) source.path = ufo.path source.location = ufo_to_location[ufo] designspace.addSource(source) # UFO-level skip list lib keys are usually ignored, except when we don't have a # Designspace file to start from. If they exist in the UFOs, promote them to a # Designspace-level lib key. However, to avoid accidents, expect the list to # exist in none or be the same in all UFOs. if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos): skip_export_glyphs = { frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos } if len(skip_export_glyphs) == 1: designspace.lib["public.skipExportGlyphs"] = sorted( next(iter(skip_export_glyphs)) ) else: raise ValueError( "The `public.skipExportGlyphs` list of all UFOs must either not " "exist or be the same in every UFO." ) return designspace
python
def _fake_designspace(self, ufos): designspace = designspaceLib.DesignSpaceDocument() ufo_to_location = defaultdict(dict) # Make weight and width axis if relevant for info_key, axis_def in zip( ("openTypeOS2WeightClass", "openTypeOS2WidthClass"), (WEIGHT_AXIS_DEF, WIDTH_AXIS_DEF), ): axis = designspace.newAxisDescriptor() axis.tag = axis_def.tag axis.name = axis_def.name mapping = [] for ufo in ufos: user_loc = getattr(ufo.info, info_key) if user_loc is not None: design_loc = class_to_value(axis_def.tag, user_loc) mapping.append((user_loc, design_loc)) ufo_to_location[ufo][axis_def.name] = design_loc mapping = sorted(set(mapping)) if len(mapping) > 1: axis.map = mapping axis.minimum = min([user_loc for user_loc, _ in mapping]) axis.maximum = max([user_loc for user_loc, _ in mapping]) axis.default = min( axis.maximum, max(axis.minimum, axis_def.default_user_loc) ) designspace.addAxis(axis) for ufo in ufos: source = designspace.newSourceDescriptor() source.font = ufo source.familyName = ufo.info.familyName source.styleName = ufo.info.styleName # source.name = '%s %s' % (source.familyName, source.styleName) source.path = ufo.path source.location = ufo_to_location[ufo] designspace.addSource(source) # UFO-level skip list lib keys are usually ignored, except when we don't have a # Designspace file to start from. If they exist in the UFOs, promote them to a # Designspace-level lib key. However, to avoid accidents, expect the list to # exist in none or be the same in all UFOs. if any("public.skipExportGlyphs" in ufo.lib for ufo in ufos): skip_export_glyphs = { frozenset(ufo.lib.get("public.skipExportGlyphs", [])) for ufo in ufos } if len(skip_export_glyphs) == 1: designspace.lib["public.skipExportGlyphs"] = sorted( next(iter(skip_export_glyphs)) ) else: raise ValueError( "The `public.skipExportGlyphs` list of all UFOs must either not " "exist or be the same in every UFO." ) return designspace
[ "def", "_fake_designspace", "(", "self", ",", "ufos", ")", ":", "designspace", "=", "designspaceLib", ".", "DesignSpaceDocument", "(", ")", "ufo_to_location", "=", "defaultdict", "(", "dict", ")", "# Make weight and width axis if relevant", "for", "info_key", ",", "...
Build a fake designspace with the given UFOs as sources, so that all builder functions can rely on the presence of a designspace.
[ "Build", "a", "fake", "designspace", "with", "the", "given", "UFOs", "as", "sources", "so", "that", "all", "builder", "functions", "can", "rely", "on", "the", "presence", "of", "a", "designspace", "." ]
9c12dc70c8d13f08d92b824e6710f6e3bb5037bb
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L640-L702