repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._unassembled_reads2_out_file_name
def _unassembled_reads2_out_file_name(self): """Checks if file name is set for reads2 output. Returns absolute path.""" if self.Parameters['-2'].isOn(): unassembled_reads2 = self._absolute( str(self.Parameters['-2'].Value)) else: raise ValueError("No reads2 (flag -2) output path specified") return unassembled_reads2
python
def _unassembled_reads2_out_file_name(self): """Checks if file name is set for reads2 output. Returns absolute path.""" if self.Parameters['-2'].isOn(): unassembled_reads2 = self._absolute( str(self.Parameters['-2'].Value)) else: raise ValueError("No reads2 (flag -2) output path specified") return unassembled_reads2
[ "def", "_unassembled_reads2_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-2'", "]", ".", "isOn", "(", ")", ":", "unassembled_reads2", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-2'", ...
Checks if file name is set for reads2 output. Returns absolute path.
[ "Checks", "if", "file", "name", "is", "set", "for", "reads2", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L135-L143
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._discarded_reads1_out_file_name
def _discarded_reads1_out_file_name(self): """Checks if file name is set for discarded reads1 output. Returns absolute path.""" if self.Parameters['-3'].isOn(): discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value)) else: raise ValueError( "No discarded-reads1 (flag -3) output path specified") return discarded_reads1
python
def _discarded_reads1_out_file_name(self): """Checks if file name is set for discarded reads1 output. Returns absolute path.""" if self.Parameters['-3'].isOn(): discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value)) else: raise ValueError( "No discarded-reads1 (flag -3) output path specified") return discarded_reads1
[ "def", "_discarded_reads1_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-3'", "]", ".", "isOn", "(", ")", ":", "discarded_reads1", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-3'", "]...
Checks if file name is set for discarded reads1 output. Returns absolute path.
[ "Checks", "if", "file", "name", "is", "set", "for", "discarded", "reads1", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L145-L153
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._discarded_reads2_out_file_name
def _discarded_reads2_out_file_name(self): """Checks if file name is set for discarded reads2 output. Returns absolute path.""" if self.Parameters['-4'].isOn(): discarded_reads2 = self._absolute(str(self.Parameters['-4'].Value)) else: raise ValueError( "No discarded-reads2 (flag -4) output path specified") return discarded_reads2
python
def _discarded_reads2_out_file_name(self): """Checks if file name is set for discarded reads2 output. Returns absolute path.""" if self.Parameters['-4'].isOn(): discarded_reads2 = self._absolute(str(self.Parameters['-4'].Value)) else: raise ValueError( "No discarded-reads2 (flag -4) output path specified") return discarded_reads2
[ "def", "_discarded_reads2_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-4'", "]", ".", "isOn", "(", ")", ":", "discarded_reads2", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-4'", "]...
Checks if file name is set for discarded reads2 output. Returns absolute path.
[ "Checks", "if", "file", "name", "is", "set", "for", "discarded", "reads2", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L155-L163
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._assembled_out_file_name
def _assembled_out_file_name(self): """Checks file name is set for assembled output. Returns absolute path.""" if self.Parameters['-s'].isOn(): assembled_reads = self._absolute(str(self.Parameters['-s'].Value)) else: raise ValueError( "No assembled-reads (flag -s) output path specified") return assembled_reads
python
def _assembled_out_file_name(self): """Checks file name is set for assembled output. Returns absolute path.""" if self.Parameters['-s'].isOn(): assembled_reads = self._absolute(str(self.Parameters['-s'].Value)) else: raise ValueError( "No assembled-reads (flag -s) output path specified") return assembled_reads
[ "def", "_assembled_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-s'", "]", ".", "isOn", "(", ")", ":", "assembled_reads", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-s'", "]", "."...
Checks file name is set for assembled output. Returns absolute path.
[ "Checks", "file", "name", "is", "set", "for", "assembled", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L165-L173
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._pretty_alignment_out_file_name
def _pretty_alignment_out_file_name(self): """Checks file name is set for pretty alignment output. Returns absolute path.""" if self.Parameters['-E'].isOn(): pretty_alignment = self._absolute(str(self.Parameters['-E'].Value)) else: raise ValueError( "No pretty-=alignment (flag -E) output path specified") return pretty_alignment
python
def _pretty_alignment_out_file_name(self): """Checks file name is set for pretty alignment output. Returns absolute path.""" if self.Parameters['-E'].isOn(): pretty_alignment = self._absolute(str(self.Parameters['-E'].Value)) else: raise ValueError( "No pretty-=alignment (flag -E) output path specified") return pretty_alignment
[ "def", "_pretty_alignment_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-E'", "]", ".", "isOn", "(", ")", ":", "pretty_alignment", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-E'", "]...
Checks file name is set for pretty alignment output. Returns absolute path.
[ "Checks", "file", "name", "is", "set", "for", "pretty", "alignment", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L175-L183
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._get_result_paths
def _get_result_paths(self, data): """Captures SeqPrep output. """ result = {} # Always output: result['UnassembledReads1'] = ResultPath(Path= self._unassembled_reads1_out_file_name( ), IsWritten=True) result['UnassembledReads2'] = ResultPath(Path= self._unassembled_reads2_out_file_name( ), IsWritten=True) # optional output, so we check for each # check for assembled reads file if self.Parameters['-s'].isOn(): result['Assembled'] = ResultPath(Path= self._assembled_out_file_name(), IsWritten=True) # check for discarded (unassembled) reads1 file if self.Parameters['-3'].isOn(): result['Reads1Discarded'] = ResultPath(Path= self._discarded_reads1_out_file_name( ), IsWritten=True) # check for discarded (unassembled) reads2 file if self.Parameters['-4'].isOn(): result['Reads2Discarded'] = ResultPath(Path= self._discarded_reads2_out_file_name( ), IsWritten=True) # check for pretty-alignment file if self.Parameters['-E'].isOn(): result['PrettyAlignments'] = ResultPath(Path= self._pretty_alignment_out_file_name( ), IsWritten=True) return result
python
def _get_result_paths(self, data): """Captures SeqPrep output. """ result = {} # Always output: result['UnassembledReads1'] = ResultPath(Path= self._unassembled_reads1_out_file_name( ), IsWritten=True) result['UnassembledReads2'] = ResultPath(Path= self._unassembled_reads2_out_file_name( ), IsWritten=True) # optional output, so we check for each # check for assembled reads file if self.Parameters['-s'].isOn(): result['Assembled'] = ResultPath(Path= self._assembled_out_file_name(), IsWritten=True) # check for discarded (unassembled) reads1 file if self.Parameters['-3'].isOn(): result['Reads1Discarded'] = ResultPath(Path= self._discarded_reads1_out_file_name( ), IsWritten=True) # check for discarded (unassembled) reads2 file if self.Parameters['-4'].isOn(): result['Reads2Discarded'] = ResultPath(Path= self._discarded_reads2_out_file_name( ), IsWritten=True) # check for pretty-alignment file if self.Parameters['-E'].isOn(): result['PrettyAlignments'] = ResultPath(Path= self._pretty_alignment_out_file_name( ), IsWritten=True) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "# Always output:", "result", "[", "'UnassembledReads1'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_unassembled_reads1_out_file_name", "(", ")", ",", "IsWri...
Captures SeqPrep output.
[ "Captures", "SeqPrep", "output", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L185-L229
bitlabstudio/django-document-library
document_library/templatetags/document_library_tags.py
get_files_for_document
def get_files_for_document(document): """ Returns the available files for all languages. In case the file is already present in another language, it does not re-add it again. """ files = [] for doc_trans in document.translations.all(): if doc_trans.filer_file is not None and \ doc_trans.filer_file not in files: doc_trans.filer_file.language = doc_trans.language_code files.append(doc_trans.filer_file) return files
python
def get_files_for_document(document): """ Returns the available files for all languages. In case the file is already present in another language, it does not re-add it again. """ files = [] for doc_trans in document.translations.all(): if doc_trans.filer_file is not None and \ doc_trans.filer_file not in files: doc_trans.filer_file.language = doc_trans.language_code files.append(doc_trans.filer_file) return files
[ "def", "get_files_for_document", "(", "document", ")", ":", "files", "=", "[", "]", "for", "doc_trans", "in", "document", ".", "translations", ".", "all", "(", ")", ":", "if", "doc_trans", ".", "filer_file", "is", "not", "None", "and", "doc_trans", ".", ...
Returns the available files for all languages. In case the file is already present in another language, it does not re-add it again.
[ "Returns", "the", "available", "files", "for", "all", "languages", "." ]
train
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/templatetags/document_library_tags.py#L11-L25
bitlabstudio/django-document-library
document_library/templatetags/document_library_tags.py
get_frontpage_documents
def get_frontpage_documents(context): """Returns the library favs that should be shown on the front page.""" req = context.get('request') qs = Document.objects.published(req).filter(is_on_front_page=True) return qs
python
def get_frontpage_documents(context): """Returns the library favs that should be shown on the front page.""" req = context.get('request') qs = Document.objects.published(req).filter(is_on_front_page=True) return qs
[ "def", "get_frontpage_documents", "(", "context", ")", ":", "req", "=", "context", ".", "get", "(", "'request'", ")", "qs", "=", "Document", ".", "objects", ".", "published", "(", "req", ")", ".", "filter", "(", "is_on_front_page", "=", "True", ")", "ret...
Returns the library favs that should be shown on the front page.
[ "Returns", "the", "library", "favs", "that", "should", "be", "shown", "on", "the", "front", "page", "." ]
train
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/templatetags/document_library_tags.py#L29-L33
BoGoEngine/bogo-python
bogo/utils.py
append_comps
def append_comps(comps, char): """ Append a character to `comps` following this rule: a vowel is added to the vowel part if there is no last consonant, else to the last consonant part; a consonant is added to the first consonant part if there is no vowel, and to the last consonant part if the vowel part is not empty. >>> transform(['', '', '']) ['c', '', ''] >>> transform(['c', '', ''], '+o') ['c', 'o', ''] >>> transform(['c', 'o', ''], '+n') ['c', 'o', 'n'] >>> transform(['c', 'o', 'n'], '+o') ['c', 'o', 'no'] """ c = list(comps) if is_vowel(char): if not c[2]: pos = 1 else: pos = 2 else: if not c[2] and not c[1]: pos = 0 else: pos = 2 c[pos] += char return c
python
def append_comps(comps, char): """ Append a character to `comps` following this rule: a vowel is added to the vowel part if there is no last consonant, else to the last consonant part; a consonant is added to the first consonant part if there is no vowel, and to the last consonant part if the vowel part is not empty. >>> transform(['', '', '']) ['c', '', ''] >>> transform(['c', '', ''], '+o') ['c', 'o', ''] >>> transform(['c', 'o', ''], '+n') ['c', 'o', 'n'] >>> transform(['c', 'o', 'n'], '+o') ['c', 'o', 'no'] """ c = list(comps) if is_vowel(char): if not c[2]: pos = 1 else: pos = 2 else: if not c[2] and not c[1]: pos = 0 else: pos = 2 c[pos] += char return c
[ "def", "append_comps", "(", "comps", ",", "char", ")", ":", "c", "=", "list", "(", "comps", ")", "if", "is_vowel", "(", "char", ")", ":", "if", "not", "c", "[", "2", "]", ":", "pos", "=", "1", "else", ":", "pos", "=", "2", "else", ":", "if", ...
Append a character to `comps` following this rule: a vowel is added to the vowel part if there is no last consonant, else to the last consonant part; a consonant is added to the first consonant part if there is no vowel, and to the last consonant part if the vowel part is not empty. >>> transform(['', '', '']) ['c', '', ''] >>> transform(['c', '', ''], '+o') ['c', 'o', ''] >>> transform(['c', 'o', ''], '+n') ['c', 'o', 'n'] >>> transform(['c', 'o', 'n'], '+o') ['c', 'o', 'no']
[ "Append", "a", "character", "to", "comps", "following", "this", "rule", ":", "a", "vowel", "is", "added", "to", "the", "vowel", "part", "if", "there", "is", "no", "last", "consonant", "else", "to", "the", "last", "consonant", "part", ";", "a", "consonant...
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/utils.py#L51-L75
BoGoEngine/bogo-python
bogo/utils.py
separate
def separate(string): """ Separate a string into smaller parts: first consonant (or head), vowel, last consonant (if any). >>> separate('tuong') ['t','uo','ng'] >>> separate('ohmyfkinggod') ['ohmyfkingg','o','d'] """ def atomic_separate(string, last_chars, last_is_vowel): if string == "" or (last_is_vowel != is_vowel(string[-1])): return (string, last_chars) else: return atomic_separate(string[:-1], string[-1] + last_chars, last_is_vowel) head, last_consonant = atomic_separate(string, "", False) first_consonant, vowel = atomic_separate(head, "", True) if last_consonant and not (vowel + first_consonant): comps = [last_consonant, '', ''] # ['', '', b] -> ['b', '', ''] else: comps = [first_consonant, vowel, last_consonant] # 'gi' and 'qu' are considered qualified consonants. # We want something like this: # ['g', 'ia', ''] -> ['gi', 'a', ''] # ['q', 'ua', ''] -> ['qu', 'a', ''] if (comps[0] != '' and comps[1] != '') and \ ((comps[0] in 'gG' and comps[1][0] in 'iI' and len(comps[1]) > 1) or (comps[0] in 'qQ' and comps[1][0] in 'uU')): comps[0] += comps[1][:1] comps[1] = comps[1][1:] return comps
python
def separate(string): """ Separate a string into smaller parts: first consonant (or head), vowel, last consonant (if any). >>> separate('tuong') ['t','uo','ng'] >>> separate('ohmyfkinggod') ['ohmyfkingg','o','d'] """ def atomic_separate(string, last_chars, last_is_vowel): if string == "" or (last_is_vowel != is_vowel(string[-1])): return (string, last_chars) else: return atomic_separate(string[:-1], string[-1] + last_chars, last_is_vowel) head, last_consonant = atomic_separate(string, "", False) first_consonant, vowel = atomic_separate(head, "", True) if last_consonant and not (vowel + first_consonant): comps = [last_consonant, '', ''] # ['', '', b] -> ['b', '', ''] else: comps = [first_consonant, vowel, last_consonant] # 'gi' and 'qu' are considered qualified consonants. # We want something like this: # ['g', 'ia', ''] -> ['gi', 'a', ''] # ['q', 'ua', ''] -> ['qu', 'a', ''] if (comps[0] != '' and comps[1] != '') and \ ((comps[0] in 'gG' and comps[1][0] in 'iI' and len(comps[1]) > 1) or (comps[0] in 'qQ' and comps[1][0] in 'uU')): comps[0] += comps[1][:1] comps[1] = comps[1][1:] return comps
[ "def", "separate", "(", "string", ")", ":", "def", "atomic_separate", "(", "string", ",", "last_chars", ",", "last_is_vowel", ")", ":", "if", "string", "==", "\"\"", "or", "(", "last_is_vowel", "!=", "is_vowel", "(", "string", "[", "-", "1", "]", ")", ...
Separate a string into smaller parts: first consonant (or head), vowel, last consonant (if any). >>> separate('tuong') ['t','uo','ng'] >>> separate('ohmyfkinggod') ['ohmyfkingg','o','d']
[ "Separate", "a", "string", "into", "smaller", "parts", ":", "first", "consonant", "(", "or", "head", ")", "vowel", "last", "consonant", "(", "if", "any", ")", "." ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/utils.py#L96-L131
biocore/burrito-fillings
bfillings/swarm_v127.py
swarm_denovo_cluster
def swarm_denovo_cluster(seq_path, d=1, threads=1, HALT_EXEC=False): """ Function : launch the Swarm de novo OTU picker Parameters: seq_path, filepath to reads d, resolution threads, number of threads to use Return : clusters, list of lists """ # Check sequence file exists if not exists(seq_path): raise ValueError("%s does not exist" % seq_path) # Instantiate the object swarm = Swarm(HALT_EXEC=HALT_EXEC) # Set the resolution if d > 0: swarm.Parameters['-d'].on(d) else: raise ValueError("Resolution -d must be a positive integer.") # Set the number of threads if threads > 0: swarm.Parameters['-t'].on(threads) else: raise ValueError("Number of threads must be a positive integer.") # create temporary file for Swarm OTU-map f, tmp_swarm_otumap = mkstemp(prefix='temp_otumap_', suffix='.swarm') close(f) swarm.Parameters['-o'].on(tmp_swarm_otumap) # Remove this file later, the final OTU-map # is output by swarm_breaker.py and returned # as a list of lists (clusters) swarm.files_to_remove.append(tmp_swarm_otumap) # Launch Swarm # set the data string to include the read filepath # (to be passed as final arguments in the swarm command) clusters = swarm(seq_path) remove_files(swarm.files_to_remove, error_on_missing=False) # Return clusters return clusters
python
def swarm_denovo_cluster(seq_path, d=1, threads=1, HALT_EXEC=False): """ Function : launch the Swarm de novo OTU picker Parameters: seq_path, filepath to reads d, resolution threads, number of threads to use Return : clusters, list of lists """ # Check sequence file exists if not exists(seq_path): raise ValueError("%s does not exist" % seq_path) # Instantiate the object swarm = Swarm(HALT_EXEC=HALT_EXEC) # Set the resolution if d > 0: swarm.Parameters['-d'].on(d) else: raise ValueError("Resolution -d must be a positive integer.") # Set the number of threads if threads > 0: swarm.Parameters['-t'].on(threads) else: raise ValueError("Number of threads must be a positive integer.") # create temporary file for Swarm OTU-map f, tmp_swarm_otumap = mkstemp(prefix='temp_otumap_', suffix='.swarm') close(f) swarm.Parameters['-o'].on(tmp_swarm_otumap) # Remove this file later, the final OTU-map # is output by swarm_breaker.py and returned # as a list of lists (clusters) swarm.files_to_remove.append(tmp_swarm_otumap) # Launch Swarm # set the data string to include the read filepath # (to be passed as final arguments in the swarm command) clusters = swarm(seq_path) remove_files(swarm.files_to_remove, error_on_missing=False) # Return clusters return clusters
[ "def", "swarm_denovo_cluster", "(", "seq_path", ",", "d", "=", "1", ",", "threads", "=", "1", ",", "HALT_EXEC", "=", "False", ")", ":", "# Check sequence file exists", "if", "not", "exists", "(", "seq_path", ")", ":", "raise", "ValueError", "(", "\"%s does n...
Function : launch the Swarm de novo OTU picker Parameters: seq_path, filepath to reads d, resolution threads, number of threads to use Return : clusters, list of lists
[ "Function", ":", "launch", "the", "Swarm", "de", "novo", "OTU", "picker" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/swarm_v127.py#L247-L299
biocore/burrito-fillings
bfillings/swarm_v127.py
Swarm._swarm_breaker
def _swarm_breaker(self, seq_path): """ Input : seq_path, a filepath to de-replicated input FASTA reads Method: using swarm_breaker.py, break chains of amplicons based on abundance information. Abundance is stored after the final underscore '_' in each sequence label (recommended procedure for Swarm) Return: clusters, a list of lists """ swarm_breaker_command = ["swarm_breaker.py", "-f", seq_path, "-s", self.Parameters['-o'].Value, "-d", str(self.Parameters['-d'].Value)] try: # launch swarm_breaker.py as a subprocess, # pipe refined OTU-map to the standard stream proc = Popen(swarm_breaker_command, stdout=PIPE, stderr=PIPE, close_fds=True) stdout, stderr = proc.communicate() if stderr: raise StandardError("Process exited with %s" % stderr) # store refined clusters in list of lists clusters = [] for line in stdout.split(linesep): # skip line if contains only the newline character if not line: break seq_ids = re.split("\t| ", line.strip()) # remove the abundance information from the labels for i in range(len(seq_ids)): seq_ids[i] = seq_ids[i].rsplit("_", 1)[0] clusters.append(seq_ids) except OSError: raise ApplicationNotFoundError("Cannot find swarm_breaker.py " "in the $PATH directories.") return clusters
python
def _swarm_breaker(self, seq_path): """ Input : seq_path, a filepath to de-replicated input FASTA reads Method: using swarm_breaker.py, break chains of amplicons based on abundance information. Abundance is stored after the final underscore '_' in each sequence label (recommended procedure for Swarm) Return: clusters, a list of lists """ swarm_breaker_command = ["swarm_breaker.py", "-f", seq_path, "-s", self.Parameters['-o'].Value, "-d", str(self.Parameters['-d'].Value)] try: # launch swarm_breaker.py as a subprocess, # pipe refined OTU-map to the standard stream proc = Popen(swarm_breaker_command, stdout=PIPE, stderr=PIPE, close_fds=True) stdout, stderr = proc.communicate() if stderr: raise StandardError("Process exited with %s" % stderr) # store refined clusters in list of lists clusters = [] for line in stdout.split(linesep): # skip line if contains only the newline character if not line: break seq_ids = re.split("\t| ", line.strip()) # remove the abundance information from the labels for i in range(len(seq_ids)): seq_ids[i] = seq_ids[i].rsplit("_", 1)[0] clusters.append(seq_ids) except OSError: raise ApplicationNotFoundError("Cannot find swarm_breaker.py " "in the $PATH directories.") return clusters
[ "def", "_swarm_breaker", "(", "self", ",", "seq_path", ")", ":", "swarm_breaker_command", "=", "[", "\"swarm_breaker.py\"", ",", "\"-f\"", ",", "seq_path", ",", "\"-s\"", ",", "self", ".", "Parameters", "[", "'-o'", "]", ".", "Value", ",", "\"-d\"", ",", "...
Input : seq_path, a filepath to de-replicated input FASTA reads Method: using swarm_breaker.py, break chains of amplicons based on abundance information. Abundance is stored after the final underscore '_' in each sequence label (recommended procedure for Swarm) Return: clusters, a list of lists
[ "Input", ":", "seq_path", "a", "filepath", "to", "de", "-", "replicated", "input", "FASTA", "reads" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/swarm_v127.py#L93-L145
biocore/burrito-fillings
bfillings/swarm_v127.py
Swarm._apply_identical_sequences_prefilter
def _apply_identical_sequences_prefilter(self, seq_path): """ Input : seq_path, a filepath to input FASTA reads Method: prepares and writes de-replicated reads to a temporary FASTA file, calls parent method to do the actual de-replication Return: exact_match_id_map, a dictionary storing de-replicated amplicon ID as key and all original FASTA IDs with identical sequences as values; unique_seqs_fp, filepath to FASTA file holding only de-replicated sequences """ # creating mapping for de-replicated reads seqs_to_cluster, exact_match_id_map =\ self._prefilter_exact_matches(parse_fasta(seq_path)) # create temporary file for storing the de-replicated reads fd, unique_seqs_fp = mkstemp( prefix='SwarmExactMatchFilter', suffix='.fasta') close(fd) self.files_to_remove.append(unique_seqs_fp) # write de-replicated reads to file unique_seqs_f = open(unique_seqs_fp, 'w') for seq_id, seq in seqs_to_cluster: unique_seqs_f.write('>%s_%d\n%s\n' % (seq_id, len(exact_match_id_map[seq_id]), seq)) unique_seqs_f.close() return exact_match_id_map, unique_seqs_fp
python
def _apply_identical_sequences_prefilter(self, seq_path): """ Input : seq_path, a filepath to input FASTA reads Method: prepares and writes de-replicated reads to a temporary FASTA file, calls parent method to do the actual de-replication Return: exact_match_id_map, a dictionary storing de-replicated amplicon ID as key and all original FASTA IDs with identical sequences as values; unique_seqs_fp, filepath to FASTA file holding only de-replicated sequences """ # creating mapping for de-replicated reads seqs_to_cluster, exact_match_id_map =\ self._prefilter_exact_matches(parse_fasta(seq_path)) # create temporary file for storing the de-replicated reads fd, unique_seqs_fp = mkstemp( prefix='SwarmExactMatchFilter', suffix='.fasta') close(fd) self.files_to_remove.append(unique_seqs_fp) # write de-replicated reads to file unique_seqs_f = open(unique_seqs_fp, 'w') for seq_id, seq in seqs_to_cluster: unique_seqs_f.write('>%s_%d\n%s\n' % (seq_id, len(exact_match_id_map[seq_id]), seq)) unique_seqs_f.close() return exact_match_id_map, unique_seqs_fp
[ "def", "_apply_identical_sequences_prefilter", "(", "self", ",", "seq_path", ")", ":", "# creating mapping for de-replicated reads", "seqs_to_cluster", ",", "exact_match_id_map", "=", "self", ".", "_prefilter_exact_matches", "(", "parse_fasta", "(", "seq_path", ")", ")", ...
Input : seq_path, a filepath to input FASTA reads Method: prepares and writes de-replicated reads to a temporary FASTA file, calls parent method to do the actual de-replication Return: exact_match_id_map, a dictionary storing de-replicated amplicon ID as key and all original FASTA IDs with identical sequences as values; unique_seqs_fp, filepath to FASTA file holding only de-replicated sequences
[ "Input", ":", "seq_path", "a", "filepath", "to", "input", "FASTA", "reads", "Method", ":", "prepares", "and", "writes", "de", "-", "replicated", "reads", "to", "a", "temporary", "FASTA", "file", "calls", "parent", "method", "to", "do", "the", "actual", "de...
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/swarm_v127.py#L166-L201
biocore/burrito-fillings
bfillings/swarm_v127.py
Swarm._map_filtered_clusters_to_full_clusters
def _map_filtered_clusters_to_full_clusters(self, clusters, filter_map): """ Input: clusters, a list of cluster lists filter_map, the seq_id in each clusters is the key to the filter_map containing all seq_ids with duplicate FASTA sequences Output: an extended list of cluster lists """ results = [] for cluster in clusters: full_cluster = [] for seq_id in cluster: full_cluster += filter_map[seq_id] results.append(full_cluster) return results
python
def _map_filtered_clusters_to_full_clusters(self, clusters, filter_map): """ Input: clusters, a list of cluster lists filter_map, the seq_id in each clusters is the key to the filter_map containing all seq_ids with duplicate FASTA sequences Output: an extended list of cluster lists """ results = [] for cluster in clusters: full_cluster = [] for seq_id in cluster: full_cluster += filter_map[seq_id] results.append(full_cluster) return results
[ "def", "_map_filtered_clusters_to_full_clusters", "(", "self", ",", "clusters", ",", "filter_map", ")", ":", "results", "=", "[", "]", "for", "cluster", "in", "clusters", ":", "full_cluster", "=", "[", "]", "for", "seq_id", "in", "cluster", ":", "full_cluster"...
Input: clusters, a list of cluster lists filter_map, the seq_id in each clusters is the key to the filter_map containing all seq_ids with duplicate FASTA sequences Output: an extended list of cluster lists
[ "Input", ":", "clusters", "a", "list", "of", "cluster", "lists", "filter_map", "the", "seq_id", "in", "each", "clusters", "is", "the", "key", "to", "the", "filter_map", "containing", "all", "seq_ids", "with", "duplicate", "FASTA", "sequences", "Output", ":", ...
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/swarm_v127.py#L203-L220
biocore/burrito-fillings
bfillings/swarm_v127.py
Swarm._get_result_paths
def _get_result_paths(self, data): """ Set the result paths """ # Swarm OTU map (mandatory output) return {'OtuMap': ResultPath(Path=self.Parameters['-o'].Value, IsWritten=True)}
python
def _get_result_paths(self, data): """ Set the result paths """ # Swarm OTU map (mandatory output) return {'OtuMap': ResultPath(Path=self.Parameters['-o'].Value, IsWritten=True)}
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# Swarm OTU map (mandatory output)", "return", "{", "'OtuMap'", ":", "ResultPath", "(", "Path", "=", "self", ".", "Parameters", "[", "'-o'", "]", ".", "Value", ",", "IsWritten", "=", "True", ")...
Set the result paths
[ "Set", "the", "result", "paths" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/swarm_v127.py#L222-L228
zagaran/mongolia
mongolia/mongo_connection.py
connect_to_database
def connect_to_database(host=None, port=None, connect=False, **kwargs): """ Explicitly begins a database connection for the application (if this function is not called, a connection is created when it is first needed). Takes arguments identical to pymongo.MongoClient.__init__ @param host: the hostname to connect to @param port: the port to connect to @param connect: if True, immediately begin connecting to MongoDB in the background; otherwise connect on the first operation """ return CONNECTION.connect(host=host, port=port, connect=connect, **kwargs)
python
def connect_to_database(host=None, port=None, connect=False, **kwargs): """ Explicitly begins a database connection for the application (if this function is not called, a connection is created when it is first needed). Takes arguments identical to pymongo.MongoClient.__init__ @param host: the hostname to connect to @param port: the port to connect to @param connect: if True, immediately begin connecting to MongoDB in the background; otherwise connect on the first operation """ return CONNECTION.connect(host=host, port=port, connect=connect, **kwargs)
[ "def", "connect_to_database", "(", "host", "=", "None", ",", "port", "=", "None", ",", "connect", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "CONNECTION", ".", "connect", "(", "host", "=", "host", ",", "port", "=", "port", ",", "conne...
Explicitly begins a database connection for the application (if this function is not called, a connection is created when it is first needed). Takes arguments identical to pymongo.MongoClient.__init__ @param host: the hostname to connect to @param port: the port to connect to @param connect: if True, immediately begin connecting to MongoDB in the background; otherwise connect on the first operation
[ "Explicitly", "begins", "a", "database", "connection", "for", "the", "application", "(", "if", "this", "function", "is", "not", "called", "a", "connection", "is", "created", "when", "it", "is", "first", "needed", ")", ".", "Takes", "arguments", "identical", ...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L95-L107
zagaran/mongolia
mongolia/mongo_connection.py
authenticate_connection
def authenticate_connection(username, password, db=None): """ Authenticates the current database connection with the passed username and password. If the database connection uses all default parameters, this can be called without connect_to_database. Otherwise, it should be preceded by a connect_to_database call. @param username: the username with which you authenticate; must match a user registered in the database @param password: the password of that user @param db: the database the user is authenticated to access. Passing None (the default) means authenticating against the admin database, which gives the connection access to all databases Example; connecting to all databases locally: connect_to_database() authenticate_connection("username", "password") Example; connecting to a particular database of a remote server: connect_to_database(host="example.com", port="12345") authenticate_connection("username", "password", db="somedb") """ return CONNECTION.authenticate(username, password, db=db)
python
def authenticate_connection(username, password, db=None): """ Authenticates the current database connection with the passed username and password. If the database connection uses all default parameters, this can be called without connect_to_database. Otherwise, it should be preceded by a connect_to_database call. @param username: the username with which you authenticate; must match a user registered in the database @param password: the password of that user @param db: the database the user is authenticated to access. Passing None (the default) means authenticating against the admin database, which gives the connection access to all databases Example; connecting to all databases locally: connect_to_database() authenticate_connection("username", "password") Example; connecting to a particular database of a remote server: connect_to_database(host="example.com", port="12345") authenticate_connection("username", "password", db="somedb") """ return CONNECTION.authenticate(username, password, db=db)
[ "def", "authenticate_connection", "(", "username", ",", "password", ",", "db", "=", "None", ")", ":", "return", "CONNECTION", ".", "authenticate", "(", "username", ",", "password", ",", "db", "=", "db", ")" ]
Authenticates the current database connection with the passed username and password. If the database connection uses all default parameters, this can be called without connect_to_database. Otherwise, it should be preceded by a connect_to_database call. @param username: the username with which you authenticate; must match a user registered in the database @param password: the password of that user @param db: the database the user is authenticated to access. Passing None (the default) means authenticating against the admin database, which gives the connection access to all databases Example; connecting to all databases locally: connect_to_database() authenticate_connection("username", "password") Example; connecting to a particular database of a remote server: connect_to_database(host="example.com", port="12345") authenticate_connection("username", "password", db="somedb")
[ "Authenticates", "the", "current", "database", "connection", "with", "the", "passed", "username", "and", "password", ".", "If", "the", "database", "connection", "uses", "all", "default", "parameters", "this", "can", "be", "called", "without", "connect_to_database", ...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L109-L132
zagaran/mongolia
mongolia/mongo_connection.py
add_user
def add_user(name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param read_only: if True the user will be read only @param db: the database the user is authenticated to access. Passing None (the default) means add the user to the admin database, which gives the user access to all databases @param **kwargs: forwarded to pymongo.database.add_user Example; adding a user with full database access: add_user("username", "password") Example; adding a user with read only privilage on a partiucalr database: add_user("username", "password", read_only=True, db="somedb") NOTE: This function will only work if mongo is being run unauthenticated or you have already authenticated with another user with appropriate privileges to add a user to the specified database. """ return CONNECTION.add_user(name, password=password, read_only=read_only, db=db, **kwargs)
python
def add_user(name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param read_only: if True the user will be read only @param db: the database the user is authenticated to access. Passing None (the default) means add the user to the admin database, which gives the user access to all databases @param **kwargs: forwarded to pymongo.database.add_user Example; adding a user with full database access: add_user("username", "password") Example; adding a user with read only privilage on a partiucalr database: add_user("username", "password", read_only=True, db="somedb") NOTE: This function will only work if mongo is being run unauthenticated or you have already authenticated with another user with appropriate privileges to add a user to the specified database. """ return CONNECTION.add_user(name, password=password, read_only=read_only, db=db, **kwargs)
[ "def", "add_user", "(", "name", ",", "password", "=", "None", ",", "read_only", "=", "None", ",", "db", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "CONNECTION", ".", "add_user", "(", "name", ",", "password", "=", "password", ",", "read...
Adds a user that can be used for authentication. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param read_only: if True the user will be read only @param db: the database the user is authenticated to access. Passing None (the default) means add the user to the admin database, which gives the user access to all databases @param **kwargs: forwarded to pymongo.database.add_user Example; adding a user with full database access: add_user("username", "password") Example; adding a user with read only privilage on a partiucalr database: add_user("username", "password", read_only=True, db="somedb") NOTE: This function will only work if mongo is being run unauthenticated or you have already authenticated with another user with appropriate privileges to add a user to the specified database.
[ "Adds", "a", "user", "that", "can", "be", "used", "for", "authentication", "." ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L180-L203
zagaran/mongolia
mongolia/mongo_connection.py
add_superuser
def add_superuser(name, password, **kwargs): """ Adds a user with userAdminAnyDatabase role to mongo. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param **kwargs: forwarded to pymongo.database.add_user """ return CONNECTION.add_user( name, password=password, roles=["userAdminAnyDatabase", "readWriteAnyDatabase", "root", "backup", "restore"], **kwargs )
python
def add_superuser(name, password, **kwargs): """ Adds a user with userAdminAnyDatabase role to mongo. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param **kwargs: forwarded to pymongo.database.add_user """ return CONNECTION.add_user( name, password=password, roles=["userAdminAnyDatabase", "readWriteAnyDatabase", "root", "backup", "restore"], **kwargs )
[ "def", "add_superuser", "(", "name", ",", "password", ",", "*", "*", "kwargs", ")", ":", "return", "CONNECTION", ".", "add_user", "(", "name", ",", "password", "=", "password", ",", "roles", "=", "[", "\"userAdminAnyDatabase\"", ",", "\"readWriteAnyDatabase\""...
Adds a user with userAdminAnyDatabase role to mongo. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param **kwargs: forwarded to pymongo.database.add_user
[ "Adds", "a", "user", "with", "userAdminAnyDatabase", "role", "to", "mongo", "." ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L205-L217
zagaran/mongolia
mongolia/mongo_connection.py
list_database
def list_database(db=None): """ Lists the names of either the databases on the machine or the collections of a particular database @param db: the database for which to list the collection names; if db is None, then it lists all databases instead the contents of the database with the name passed in db """ if db is None: return CONNECTION.get_connection().database_names() return CONNECTION.get_connection()[db].collection_names()
python
def list_database(db=None): """ Lists the names of either the databases on the machine or the collections of a particular database @param db: the database for which to list the collection names; if db is None, then it lists all databases instead the contents of the database with the name passed in db """ if db is None: return CONNECTION.get_connection().database_names() return CONNECTION.get_connection()[db].collection_names()
[ "def", "list_database", "(", "db", "=", "None", ")", ":", "if", "db", "is", "None", ":", "return", "CONNECTION", ".", "get_connection", "(", ")", ".", "database_names", "(", ")", "return", "CONNECTION", ".", "get_connection", "(", ")", "[", "db", "]", ...
Lists the names of either the databases on the machine or the collections of a particular database @param db: the database for which to list the collection names; if db is None, then it lists all databases instead the contents of the database with the name passed in db
[ "Lists", "the", "names", "of", "either", "the", "databases", "on", "the", "machine", "or", "the", "collections", "of", "a", "particular", "database" ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L219-L230
zagaran/mongolia
mongolia/mongo_connection.py
MongoConnection.connect
def connect(self, host=None, port=None, connect=False, **kwargs): """ Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__""" try: self.__connection = MongoClient(host=host, port=port, connect=connect, **kwargs) except (AutoReconnect, ConnectionFailure, ServerSelectionTimeoutError): raise DatabaseIsDownError("No mongod process is running.")
python
def connect(self, host=None, port=None, connect=False, **kwargs): """ Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__""" try: self.__connection = MongoClient(host=host, port=port, connect=connect, **kwargs) except (AutoReconnect, ConnectionFailure, ServerSelectionTimeoutError): raise DatabaseIsDownError("No mongod process is running.")
[ "def", "connect", "(", "self", ",", "host", "=", "None", ",", "port", "=", "None", ",", "connect", "=", "False", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "__connection", "=", "MongoClient", "(", "host", "=", "host", ",", "port", ...
Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__
[ "Explicitly", "creates", "the", "MongoClient", ";", "this", "method", "must", "be", "used", "in", "order", "to", "specify", "a", "non", "-", "default", "host", "or", "port", "to", "the", "MongoClient", ".", "Takes", "arguments", "identical", "to", "MongoClie...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L64-L71
zagaran/mongolia
mongolia/mongo_connection.py
MongoConnection.authenticate
def authenticate(self, username, password, db=None): """ Authenticates the MongoClient with the passed username and password """ if db is None: return self.get_connection().admin.authenticate(username, password) return self.get_connection()[db].authenticate(username, password)
python
def authenticate(self, username, password, db=None): """ Authenticates the MongoClient with the passed username and password """ if db is None: return self.get_connection().admin.authenticate(username, password) return self.get_connection()[db].authenticate(username, password)
[ "def", "authenticate", "(", "self", ",", "username", ",", "password", ",", "db", "=", "None", ")", ":", "if", "db", "is", "None", ":", "return", "self", ".", "get_connection", "(", ")", ".", "admin", ".", "authenticate", "(", "username", ",", "password...
Authenticates the MongoClient with the passed username and password
[ "Authenticates", "the", "MongoClient", "with", "the", "passed", "username", "and", "password" ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L73-L77
zagaran/mongolia
mongolia/mongo_connection.py
MongoConnection.add_user
def add_user(self, name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication """ if db is None: return self.get_connection().admin.add_user( name, password=password, read_only=read_only, **kwargs) return self.get_connection()[db].add_user( name, password=password, read_only=read_only, **kwargs)
python
def add_user(self, name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication """ if db is None: return self.get_connection().admin.add_user( name, password=password, read_only=read_only, **kwargs) return self.get_connection()[db].add_user( name, password=password, read_only=read_only, **kwargs)
[ "def", "add_user", "(", "self", ",", "name", ",", "password", "=", "None", ",", "read_only", "=", "None", ",", "db", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "db", "is", "None", ":", "return", "self", ".", "get_connection", "(", ")", ...
Adds a user that can be used for authentication
[ "Adds", "a", "user", "that", "can", "be", "used", "for", "authentication" ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L79-L85
kejbaly2/metrique
metrique/core_api.py
metrique_object
def metrique_object(_oid, _id=None, _hash=None, _start=None, _end=None, _e=None, _v=None, id=None, __v__=None, **kwargs): ''' Function which takes a dictionary (Mapping) object as input and returns return back a metrique object. Special meta property are added to each object:: _oid: ... _start: ... ... FIXME ''' # NOTE: we completely ignore incoming 'id' keys! # id is RESERVED and ALWAYS expected to be 'autoincrement' # upon insertion into DB (though, its optional, depending # on backend storage behaivor). if id: warnings.warn('non-null "id" keys detected, ignoring them!') _e = dict(_e or {}) # expecting a dict with copy() atr _v = int(_v or 0) if not isinstance(_start, float): _start = dt2ts(_start) if _start else utcnow(as_datetime=False) assert _start is not None, "_start (%s) must be set!" % _start if not isinstance(_end, float): _end = dt2ts(_end) if _end else None _err_msg = "_end(%s) must be >= _start(%s) or None!" % (_end, _start) assert _end is None or bool(_end >= _start), _err_msg # these meta fields are used to generate unique object _hash kwargs['_oid'] = _oid kwargs['_v'] = _v kwargs['_id'] = gen_id(_oid, _start, _end) # ignore passed in _id # generate unique, consistent object _hash based on 'frozen' obj contents # FIXME: make _hash == None valid #kwargs['_hash'] = jsonhash(kwargs) if _hash else None kwargs['_hash'] = jsonhash(kwargs) # add some additional non-hashable meta data kwargs['_start'] = _start kwargs['_end'] = _end kwargs['__v__'] = __v__ or __version__ kwargs['_e'] = _e return kwargs
python
def metrique_object(_oid, _id=None, _hash=None, _start=None, _end=None, _e=None, _v=None, id=None, __v__=None, **kwargs): ''' Function which takes a dictionary (Mapping) object as input and returns return back a metrique object. Special meta property are added to each object:: _oid: ... _start: ... ... FIXME ''' # NOTE: we completely ignore incoming 'id' keys! # id is RESERVED and ALWAYS expected to be 'autoincrement' # upon insertion into DB (though, its optional, depending # on backend storage behaivor). if id: warnings.warn('non-null "id" keys detected, ignoring them!') _e = dict(_e or {}) # expecting a dict with copy() atr _v = int(_v or 0) if not isinstance(_start, float): _start = dt2ts(_start) if _start else utcnow(as_datetime=False) assert _start is not None, "_start (%s) must be set!" % _start if not isinstance(_end, float): _end = dt2ts(_end) if _end else None _err_msg = "_end(%s) must be >= _start(%s) or None!" % (_end, _start) assert _end is None or bool(_end >= _start), _err_msg # these meta fields are used to generate unique object _hash kwargs['_oid'] = _oid kwargs['_v'] = _v kwargs['_id'] = gen_id(_oid, _start, _end) # ignore passed in _id # generate unique, consistent object _hash based on 'frozen' obj contents # FIXME: make _hash == None valid #kwargs['_hash'] = jsonhash(kwargs) if _hash else None kwargs['_hash'] = jsonhash(kwargs) # add some additional non-hashable meta data kwargs['_start'] = _start kwargs['_end'] = _end kwargs['__v__'] = __v__ or __version__ kwargs['_e'] = _e return kwargs
[ "def", "metrique_object", "(", "_oid", ",", "_id", "=", "None", ",", "_hash", "=", "None", ",", "_start", "=", "None", ",", "_end", "=", "None", ",", "_e", "=", "None", ",", "_v", "=", "None", ",", "id", "=", "None", ",", "__v__", "=", "None", ...
Function which takes a dictionary (Mapping) object as input and returns return back a metrique object. Special meta property are added to each object:: _oid: ... _start: ... ... FIXME
[ "Function", "which", "takes", "a", "dictionary", "(", "Mapping", ")", "object", "as", "input", "and", "returns", "return", "back", "a", "metrique", "object", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L85-L131
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer._add_variants
def _add_variants(self, key, value, schema): ''' also possible to define some function that takes current value and creates a new value from it ''' variants = schema.get('variants') obj = {} if variants: for _key, func in variants.iteritems(): _value = func(value, self.store) obj.update({_key: _value}) return obj
python
def _add_variants(self, key, value, schema): ''' also possible to define some function that takes current value and creates a new value from it ''' variants = schema.get('variants') obj = {} if variants: for _key, func in variants.iteritems(): _value = func(value, self.store) obj.update({_key: _value}) return obj
[ "def", "_add_variants", "(", "self", ",", "key", ",", "value", ",", "schema", ")", ":", "variants", "=", "schema", ".", "get", "(", "'variants'", ")", "obj", "=", "{", "}", "if", "variants", ":", "for", "_key", ",", "func", "in", "variants", ".", "...
also possible to define some function that takes current value and creates a new value from it
[ "also", "possible", "to", "define", "some", "function", "that", "takes", "current", "value", "and", "creates", "a", "new", "value", "from", "it" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L341-L351
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer._type_container
def _type_container(self, value, _type): ' apply type to all values in the list ' if value is None: # normalize null containers to empty list return [] elif not isinstance(value, list): raise ValueError("expected list type, got: %s" % type(value)) else: return sorted(self._type_single(item, _type) for item in value)
python
def _type_container(self, value, _type): ' apply type to all values in the list ' if value is None: # normalize null containers to empty list return [] elif not isinstance(value, list): raise ValueError("expected list type, got: %s" % type(value)) else: return sorted(self._type_single(item, _type) for item in value)
[ "def", "_type_container", "(", "self", ",", "value", ",", "_type", ")", ":", "if", "value", "is", "None", ":", "# normalize null containers to empty list", "return", "[", "]", "elif", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "ValueE...
apply type to all values in the list
[ "apply", "type", "to", "all", "values", "in", "the", "list" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L490-L498
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer._type_single
def _type_single(self, value, _type): ' apply type to the single value ' if value is None or _type in (None, NoneType): # don't convert null values # default type is the original type if none set pass elif isinstance(value, _type): # or values already of correct type # normalize all dates to epochs value = dt2ts(value) if _type in [datetime, date] else value else: if _type in (datetime, date): # normalize all dates to epochs value = dt2ts(value) elif _type in (unicode, str): # make sure all string types are properly unicoded value = to_encoding(value) else: try: value = _type(value) except Exception: value = to_encoding(value) logger.error("typecast failed: %s(value=%s)" % ( _type.__name__, value)) raise return value
python
def _type_single(self, value, _type): ' apply type to the single value ' if value is None or _type in (None, NoneType): # don't convert null values # default type is the original type if none set pass elif isinstance(value, _type): # or values already of correct type # normalize all dates to epochs value = dt2ts(value) if _type in [datetime, date] else value else: if _type in (datetime, date): # normalize all dates to epochs value = dt2ts(value) elif _type in (unicode, str): # make sure all string types are properly unicoded value = to_encoding(value) else: try: value = _type(value) except Exception: value = to_encoding(value) logger.error("typecast failed: %s(value=%s)" % ( _type.__name__, value)) raise return value
[ "def", "_type_single", "(", "self", ",", "value", ",", "_type", ")", ":", "if", "value", "is", "None", "or", "_type", "in", "(", "None", ",", "NoneType", ")", ":", "# don't convert null values", "# default type is the original type if none set", "pass", "elif", ...
apply type to the single value
[ "apply", "type", "to", "the", "single", "value" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L500-L524
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer.flush
def flush(self, objects=None, batch_size=None, **kwargs): ''' flush objects stored in self.container or those passed in''' batch_size = batch_size or self.config.get('batch_size') # if we're flushing these from self.store, we'll want to # pop them later. if objects: from_store = False else: from_store = True objects = self.itervalues() # sort by _oid for grouping by _oid below objects = sorted(objects, key=lambda x: x['_oid']) batch, _ids = [], [] # batch in groups with _oid, since upsert's delete # all _oid rows when autosnap=False! for key, group in groupby(objects, lambda x: x['_oid']): _grouped = list(group) if len(batch) + len(_grouped) > batch_size: logger.debug("Upserting %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) logger.debug("... done upserting %s objects" % len(batch)) _ids.extend(_) # start a new batch batch = _grouped else: # extend existing batch, since still will be < batch_size batch.extend(_grouped) else: if batch: # get the last batch too logger.debug("Upserting last batch of %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) _ids.extend(_) logger.debug("... Finished upserting all objects!") if from_store: for _id in _ids: # try to pop the _id's flushed from store; warn / ignore # the KeyError if they're not there try: self.store.pop(_id) except KeyError: logger.warn( "failed to pop {} from self.store!".format(_id)) return sorted(_ids)
python
def flush(self, objects=None, batch_size=None, **kwargs): ''' flush objects stored in self.container or those passed in''' batch_size = batch_size or self.config.get('batch_size') # if we're flushing these from self.store, we'll want to # pop them later. if objects: from_store = False else: from_store = True objects = self.itervalues() # sort by _oid for grouping by _oid below objects = sorted(objects, key=lambda x: x['_oid']) batch, _ids = [], [] # batch in groups with _oid, since upsert's delete # all _oid rows when autosnap=False! for key, group in groupby(objects, lambda x: x['_oid']): _grouped = list(group) if len(batch) + len(_grouped) > batch_size: logger.debug("Upserting %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) logger.debug("... done upserting %s objects" % len(batch)) _ids.extend(_) # start a new batch batch = _grouped else: # extend existing batch, since still will be < batch_size batch.extend(_grouped) else: if batch: # get the last batch too logger.debug("Upserting last batch of %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) _ids.extend(_) logger.debug("... Finished upserting all objects!") if from_store: for _id in _ids: # try to pop the _id's flushed from store; warn / ignore # the KeyError if they're not there try: self.store.pop(_id) except KeyError: logger.warn( "failed to pop {} from self.store!".format(_id)) return sorted(_ids)
[ "def", "flush", "(", "self", ",", "objects", "=", "None", ",", "batch_size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "batch_size", "=", "batch_size", "or", "self", ".", "config", ".", "get", "(", "'batch_size'", ")", "# if we're flushing these from...
flush objects stored in self.container or those passed in
[ "flush", "objects", "stored", "in", "self", ".", "container", "or", "those", "passed", "in" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L583-L627
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer.count
def count(self, query=None, date=None): ''' Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner ''' return self.proxy.count(table=self.name, query=query, date=date)
python
def count(self, query=None, date=None): ''' Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner ''' return self.proxy.count(table=self.name, query=query, date=date)
[ "def", "count", "(", "self", ",", "query", "=", "None", ",", "date", "=", "None", ")", ":", "return", "self", ".", "proxy", ".", "count", "(", "table", "=", "self", ".", "name", ",", "query", "=", "query", ",", "date", "=", "date", ")" ]
Run a query on the given cube and return only the count of resulting matches. :param query: The query in pql :param date: date (metrique date range) that should be queried If date==None then the most recent versions of the objects will be queried. :param collection: cube name :param owner: username of cube owner
[ "Run", "a", "query", "on", "the", "given", "cube", "and", "return", "only", "the", "count", "of", "resulting", "matches", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L708-L720
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer.index
def index(self, fields, name=None, **kwargs): ''' Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner ''' return self.proxy.index(fields=fields, name=name, table=self.name, **kwargs)
python
def index(self, fields, name=None, **kwargs): ''' Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner ''' return self.proxy.index(fields=fields, name=name, table=self.name, **kwargs)
[ "def", "index", "(", "self", ",", "fields", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "proxy", ".", "index", "(", "fields", "=", "fields", ",", "name", "=", "name", ",", "table", "=", "self", ".", "name",...
Build a new index on a cube. Examples: + index('field_name') :param fields: A single field or a list of (key, direction) pairs :param name: (optional) Custom name to use for this index :param collection: cube name :param owner: username of cube owner
[ "Build", "a", "new", "index", "on", "a", "cube", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L747-L760
kejbaly2/metrique
metrique/core_api.py
MetriqueContainer.share
def share(self, with_user, roles=None): ''' Give cube access rights to another user ''' return self.proxy.share(table=self.name, with_user=with_user, roles=roles)
python
def share(self, with_user, roles=None): ''' Give cube access rights to another user ''' return self.proxy.share(table=self.name, with_user=with_user, roles=roles)
[ "def", "share", "(", "self", ",", "with_user", ",", "roles", "=", "None", ")", ":", "return", "self", ".", "proxy", ".", "share", "(", "table", "=", "self", ".", "name", ",", "with_user", "=", "with_user", ",", "roles", "=", "roles", ")" ]
Give cube access rights to another user
[ "Give", "cube", "access", "rights", "to", "another", "user" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/core_api.py#L774-L779
biocore/burrito-fillings
bfillings/sortmerna_v2.py
build_database_sortmerna
def build_database_sortmerna(fasta_path, max_pos=None, output_dir=None, temp_dir=tempfile.gettempdir(), HALT_EXEC=False): """ Build sortmerna db from fasta_path; return db name and list of files created Parameters ---------- fasta_path : string path to fasta file of sequences to build database. max_pos : integer, optional maximum positions to store per seed in index [default: 10000]. output_dir : string, optional directory where output should be written [default: same directory as fasta_path] HALT_EXEC : boolean, optional halt just before running the indexdb_rna command and print the command -- useful for debugging [default: False]. Return ------ db_name : string filepath to indexed database. db_filepaths : list output files by indexdb_rna """ if fasta_path is None: raise ValueError("Error: path to fasta reference " "sequences must exist.") fasta_dir, fasta_filename = split(fasta_path) if not output_dir: output_dir = fasta_dir or '.' # Will cd to this directory, so just pass the filename # so the app is not confused by relative paths fasta_path = fasta_filename index_basename = splitext(fasta_filename)[0] db_name = join(output_dir, index_basename) # Instantiate the object sdb = IndexDB(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) # The parameter --ref STRING must follow the format where # STRING = /path/to/ref.fasta,/path/to/ref.idx sdb.Parameters['--ref'].on("%s,%s" % (fasta_path, db_name)) # Set temporary directory sdb.Parameters['--tmpdir'].on(temp_dir) # Override --max_pos parameter if max_pos is not None: sdb.Parameters['--max_pos'].on(max_pos) # Run indexdb_rna app_result = sdb() # Return all output files (by indexdb_rna) as a list, # first however remove the StdErr and StdOut filepaths # as they files will be destroyed at the exit from # this function (IndexDB is a local instance) db_filepaths = [v.name for k, v in app_result.items() if k not in {'StdErr', 'StdOut'} and hasattr(v, 'name')] return db_name, db_filepaths
python
def build_database_sortmerna(fasta_path, max_pos=None, output_dir=None, temp_dir=tempfile.gettempdir(), HALT_EXEC=False): """ Build sortmerna db from fasta_path; return db name and list of files created Parameters ---------- fasta_path : string path to fasta file of sequences to build database. max_pos : integer, optional maximum positions to store per seed in index [default: 10000]. output_dir : string, optional directory where output should be written [default: same directory as fasta_path] HALT_EXEC : boolean, optional halt just before running the indexdb_rna command and print the command -- useful for debugging [default: False]. Return ------ db_name : string filepath to indexed database. db_filepaths : list output files by indexdb_rna """ if fasta_path is None: raise ValueError("Error: path to fasta reference " "sequences must exist.") fasta_dir, fasta_filename = split(fasta_path) if not output_dir: output_dir = fasta_dir or '.' # Will cd to this directory, so just pass the filename # so the app is not confused by relative paths fasta_path = fasta_filename index_basename = splitext(fasta_filename)[0] db_name = join(output_dir, index_basename) # Instantiate the object sdb = IndexDB(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) # The parameter --ref STRING must follow the format where # STRING = /path/to/ref.fasta,/path/to/ref.idx sdb.Parameters['--ref'].on("%s,%s" % (fasta_path, db_name)) # Set temporary directory sdb.Parameters['--tmpdir'].on(temp_dir) # Override --max_pos parameter if max_pos is not None: sdb.Parameters['--max_pos'].on(max_pos) # Run indexdb_rna app_result = sdb() # Return all output files (by indexdb_rna) as a list, # first however remove the StdErr and StdOut filepaths # as they files will be destroyed at the exit from # this function (IndexDB is a local instance) db_filepaths = [v.name for k, v in app_result.items() if k not in {'StdErr', 'StdOut'} and hasattr(v, 'name')] return db_name, db_filepaths
[ "def", "build_database_sortmerna", "(", "fasta_path", ",", "max_pos", "=", "None", ",", "output_dir", "=", "None", ",", "temp_dir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "HALT_EXEC", "=", "False", ")", ":", "if", "fasta_path", "is", "None", ":...
Build sortmerna db from fasta_path; return db name and list of files created Parameters ---------- fasta_path : string path to fasta file of sequences to build database. max_pos : integer, optional maximum positions to store per seed in index [default: 10000]. output_dir : string, optional directory where output should be written [default: same directory as fasta_path] HALT_EXEC : boolean, optional halt just before running the indexdb_rna command and print the command -- useful for debugging [default: False]. Return ------ db_name : string filepath to indexed database. db_filepaths : list output files by indexdb_rna
[ "Build", "sortmerna", "db", "from", "fasta_path", ";", "return", "db", "name", "and", "list", "of", "files", "created" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L75-L145
biocore/burrito-fillings
bfillings/sortmerna_v2.py
sortmerna_ref_cluster
def sortmerna_ref_cluster(seq_path=None, sortmerna_db=None, refseqs_fp=None, result_path=None, tabular=False, max_e_value=1, similarity=0.97, coverage=0.97, threads=1, best=1, HALT_EXEC=False ): """Launch sortmerna OTU picker Parameters ---------- seq_path : str filepath to query sequences. sortmerna_db : str indexed reference database. refseqs_fp : str filepath of reference sequences. result_path : str filepath to output OTU map. max_e_value : float, optional E-value threshold [default: 1]. similarity : float, optional similarity %id threshold [default: 0.97]. coverage : float, optional query coverage % threshold [default: 0.97]. threads : int, optional number of threads to use (OpenMP) [default: 1]. tabular : bool, optional output BLAST tabular alignments [default: False]. best : int, optional number of best alignments to output per read [default: 1]. Returns ------- clusters : dict of lists OTU ids and reads mapping to them failures : list reads which did not align """ # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set input query sequences path if seq_path is not None: smr.Parameters['--reads'].on(seq_path) else: raise ValueError("Error: a read file is mandatory input.") # Set the input reference sequence + indexed database path if sortmerna_db is not None: smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) else: raise ValueError("Error: an indexed database for reference set %s must" " already exist.\nUse indexdb_rna to index the" " database." % refseqs_fp) if result_path is None: raise ValueError("Error: the result path must be set.") # Set output results path (for Blast alignments, clusters and failures) output_dir = dirname(result_path) if output_dir is not None: output_file = join(output_dir, "sortmerna_otus") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if max_e_value is not None: smr.Parameters['-e'].on(max_e_value) # Set similarity threshold if similarity is not None: smr.Parameters['--id'].on(similarity) # Set query coverage threshold if coverage is not None: smr.Parameters['--coverage'].on(coverage) # Set number of best alignments to output if best is not None: smr.Parameters['--best'].on(best) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if tabular: smr.Parameters['--blast'].on("3") # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Run sortmerna app_result = smr() # Put clusters into a map of lists f_otumap = app_result['OtuMap'] rows = (line.strip().split('\t') for line in f_otumap) clusters = {r[0]: r[1:] for r in rows} # Put failures into a list f_failure = app_result['FastaForDenovo'] failures = [re.split('>| ', label)[0] for label, seq in parse_fasta(f_failure)] # remove the aligned FASTA file and failures FASTA file # (currently these are re-constructed using pick_rep_set.py # further in the OTU-picking pipeline) smr_files_to_remove = [app_result['FastaForDenovo'].name, app_result['FastaMatches'].name, app_result['OtuMap'].name] return clusters, failures, smr_files_to_remove
python
def sortmerna_ref_cluster(seq_path=None, sortmerna_db=None, refseqs_fp=None, result_path=None, tabular=False, max_e_value=1, similarity=0.97, coverage=0.97, threads=1, best=1, HALT_EXEC=False ): """Launch sortmerna OTU picker Parameters ---------- seq_path : str filepath to query sequences. sortmerna_db : str indexed reference database. refseqs_fp : str filepath of reference sequences. result_path : str filepath to output OTU map. max_e_value : float, optional E-value threshold [default: 1]. similarity : float, optional similarity %id threshold [default: 0.97]. coverage : float, optional query coverage % threshold [default: 0.97]. threads : int, optional number of threads to use (OpenMP) [default: 1]. tabular : bool, optional output BLAST tabular alignments [default: False]. best : int, optional number of best alignments to output per read [default: 1]. Returns ------- clusters : dict of lists OTU ids and reads mapping to them failures : list reads which did not align """ # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set input query sequences path if seq_path is not None: smr.Parameters['--reads'].on(seq_path) else: raise ValueError("Error: a read file is mandatory input.") # Set the input reference sequence + indexed database path if sortmerna_db is not None: smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) else: raise ValueError("Error: an indexed database for reference set %s must" " already exist.\nUse indexdb_rna to index the" " database." % refseqs_fp) if result_path is None: raise ValueError("Error: the result path must be set.") # Set output results path (for Blast alignments, clusters and failures) output_dir = dirname(result_path) if output_dir is not None: output_file = join(output_dir, "sortmerna_otus") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if max_e_value is not None: smr.Parameters['-e'].on(max_e_value) # Set similarity threshold if similarity is not None: smr.Parameters['--id'].on(similarity) # Set query coverage threshold if coverage is not None: smr.Parameters['--coverage'].on(coverage) # Set number of best alignments to output if best is not None: smr.Parameters['--best'].on(best) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if tabular: smr.Parameters['--blast'].on("3") # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Run sortmerna app_result = smr() # Put clusters into a map of lists f_otumap = app_result['OtuMap'] rows = (line.strip().split('\t') for line in f_otumap) clusters = {r[0]: r[1:] for r in rows} # Put failures into a list f_failure = app_result['FastaForDenovo'] failures = [re.split('>| ', label)[0] for label, seq in parse_fasta(f_failure)] # remove the aligned FASTA file and failures FASTA file # (currently these are re-constructed using pick_rep_set.py # further in the OTU-picking pipeline) smr_files_to_remove = [app_result['FastaForDenovo'].name, app_result['FastaMatches'].name, app_result['OtuMap'].name] return clusters, failures, smr_files_to_remove
[ "def", "sortmerna_ref_cluster", "(", "seq_path", "=", "None", ",", "sortmerna_db", "=", "None", ",", "refseqs_fp", "=", "None", ",", "result_path", "=", "None", ",", "tabular", "=", "False", ",", "max_e_value", "=", "1", ",", "similarity", "=", "0.97", ","...
Launch sortmerna OTU picker Parameters ---------- seq_path : str filepath to query sequences. sortmerna_db : str indexed reference database. refseqs_fp : str filepath of reference sequences. result_path : str filepath to output OTU map. max_e_value : float, optional E-value threshold [default: 1]. similarity : float, optional similarity %id threshold [default: 0.97]. coverage : float, optional query coverage % threshold [default: 0.97]. threads : int, optional number of threads to use (OpenMP) [default: 1]. tabular : bool, optional output BLAST tabular alignments [default: False]. best : int, optional number of best alignments to output per read [default: 1]. Returns ------- clusters : dict of lists OTU ids and reads mapping to them failures : list reads which did not align
[ "Launch", "sortmerna", "OTU", "picker" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L297-L418
biocore/burrito-fillings
bfillings/sortmerna_v2.py
sortmerna_map
def sortmerna_map(seq_path, output_dir, refseqs_fp, sortmerna_db, e_value=1, threads=1, best=None, num_alignments=None, HALT_EXEC=False, output_sam=False, sam_SQ_tags=False, blast_format=3, print_all_reads=True, ): """Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths() """ if not (blast_format or output_sam): raise ValueError("Either Blast or SAM output alignment " "format must be chosen.") if (best and num_alignments): raise ValueError("Only one of --best or --num_alignments " "options must be chosen.") # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set the input reference sequence + indexed database path smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) # Set input query sequences path smr.Parameters['--reads'].on(seq_path) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if blast_format: smr.Parameters['--blast'].on(blast_format) # Output alignments in SAM format if output_sam: smr.Parameters['--sam'].on() if sam_SQ_tags: smr.Parameters['--SQ'].on() # Turn on NULL string alignment output if print_all_reads: smr.Parameters['--print_all_reads'].on() # Set output results path (for Blast alignments and log file) output_file = join(output_dir, "sortmerna_map") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if e_value is not None: smr.Parameters['-e'].on(e_value) # Set number of best alignments to output per read if best is not None: smr.Parameters['--best'].on(best) # Set number of first alignments passing E-value threshold # to output per read if num_alignments is not None: smr.Parameters['--num_alignments'].on(num_alignments) # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Turn off parameters related to OTU-picking smr.Parameters['--fastx'].off() smr.Parameters['--otu_map'].off() smr.Parameters['--de_novo_otu'].off() smr.Parameters['--id'].off() smr.Parameters['--coverage'].off() # Run sortmerna app_result = smr() return app_result
python
def sortmerna_map(seq_path, output_dir, refseqs_fp, sortmerna_db, e_value=1, threads=1, best=None, num_alignments=None, HALT_EXEC=False, output_sam=False, sam_SQ_tags=False, blast_format=3, print_all_reads=True, ): """Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths() """ if not (blast_format or output_sam): raise ValueError("Either Blast or SAM output alignment " "format must be chosen.") if (best and num_alignments): raise ValueError("Only one of --best or --num_alignments " "options must be chosen.") # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set the input reference sequence + indexed database path smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) # Set input query sequences path smr.Parameters['--reads'].on(seq_path) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if blast_format: smr.Parameters['--blast'].on(blast_format) # Output alignments in SAM format if output_sam: smr.Parameters['--sam'].on() if sam_SQ_tags: smr.Parameters['--SQ'].on() # Turn on NULL string alignment output if print_all_reads: smr.Parameters['--print_all_reads'].on() # Set output results path (for Blast alignments and log file) output_file = join(output_dir, "sortmerna_map") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if e_value is not None: smr.Parameters['-e'].on(e_value) # Set number of best alignments to output per read if best is not None: smr.Parameters['--best'].on(best) # Set number of first alignments passing E-value threshold # to output per read if num_alignments is not None: smr.Parameters['--num_alignments'].on(num_alignments) # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Turn off parameters related to OTU-picking smr.Parameters['--fastx'].off() smr.Parameters['--otu_map'].off() smr.Parameters['--de_novo_otu'].off() smr.Parameters['--id'].off() smr.Parameters['--coverage'].off() # Run sortmerna app_result = smr() return app_result
[ "def", "sortmerna_map", "(", "seq_path", ",", "output_dir", ",", "refseqs_fp", ",", "sortmerna_db", ",", "e_value", "=", "1", ",", "threads", "=", "1", ",", "best", "=", "None", ",", "num_alignments", "=", "None", ",", "HALT_EXEC", "=", "False", ",", "ou...
Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths()
[ "Launch", "sortmerna", "mapper" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L421-L544
biocore/burrito-fillings
bfillings/sortmerna_v2.py
IndexDB._get_result_paths
def _get_result_paths(self, data): """ Build the dict of result filepaths """ # get the filepath of the indexed database (after comma) # /path/to/refseqs.fasta,/path/to/refseqs.idx # ^------------------^ db_name = (self.Parameters['--ref'].Value).split(',')[1] result = {} extensions = ['bursttrie', 'kmer', 'pos', 'stats'] for extension in extensions: for file_path in glob("%s.%s*" % (db_name, extension)): # this will match e.g. nr.bursttrie_0.dat, nr.bursttrie_1.dat # and nr.stats key = file_path.split(db_name + '.')[1] result[key] = ResultPath(Path=file_path, IsWritten=True) return result
python
def _get_result_paths(self, data): """ Build the dict of result filepaths """ # get the filepath of the indexed database (after comma) # /path/to/refseqs.fasta,/path/to/refseqs.idx # ^------------------^ db_name = (self.Parameters['--ref'].Value).split(',')[1] result = {} extensions = ['bursttrie', 'kmer', 'pos', 'stats'] for extension in extensions: for file_path in glob("%s.%s*" % (db_name, extension)): # this will match e.g. nr.bursttrie_0.dat, nr.bursttrie_1.dat # and nr.stats key = file_path.split(db_name + '.')[1] result[key] = ResultPath(Path=file_path, IsWritten=True) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# get the filepath of the indexed database (after comma)", "# /path/to/refseqs.fasta,/path/to/refseqs.idx", "# ^------------------^", "db_name", "=", "(", "self", ".", "Parameters", "[", "'--r...
Build the dict of result filepaths
[ "Build", "the", "dict", "of", "result", "filepaths" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L56-L72
biocore/burrito-fillings
bfillings/sortmerna_v2.py
Sortmerna._get_result_paths
def _get_result_paths(self, data): """ Set the result paths """ result = {} # get the file extension of the reads file (sortmerna # internally outputs all results with this extension) fileExtension = splitext(self.Parameters['--reads'].Value)[1] # at this point the parameter --aligned should be set as # sortmerna will not run without it if self.Parameters['--aligned'].isOff(): raise ValueError("Error: the --aligned parameter must be set.") # file base name for aligned reads output_base = self.Parameters['--aligned'].Value # Blast alignments result['BlastAlignments'] =\ ResultPath(Path=output_base + '.blast', IsWritten=self.Parameters['--blast'].isOn()) # SAM alignments result['SAMAlignments'] =\ ResultPath(Path=output_base + '.sam', IsWritten=self.Parameters['--sam'].isOn()) # OTU map (mandatory output) result['OtuMap'] =\ ResultPath(Path=output_base + '_otus.txt', IsWritten=self.Parameters['--otu_map'].isOn()) # FASTA file of sequences in the OTU map (madatory output) result['FastaMatches'] =\ ResultPath(Path=output_base + fileExtension, IsWritten=self.Parameters['--fastx'].isOn()) # FASTA file of sequences not in the OTU map (mandatory output) result['FastaForDenovo'] =\ ResultPath(Path=output_base + '_denovo' + fileExtension, IsWritten=self.Parameters['--de_novo_otu'].isOn()) # Log file result['LogFile'] =\ ResultPath(Path=output_base + '.log', IsWritten=self.Parameters['--log'].isOn()) return result
python
def _get_result_paths(self, data): """ Set the result paths """ result = {} # get the file extension of the reads file (sortmerna # internally outputs all results with this extension) fileExtension = splitext(self.Parameters['--reads'].Value)[1] # at this point the parameter --aligned should be set as # sortmerna will not run without it if self.Parameters['--aligned'].isOff(): raise ValueError("Error: the --aligned parameter must be set.") # file base name for aligned reads output_base = self.Parameters['--aligned'].Value # Blast alignments result['BlastAlignments'] =\ ResultPath(Path=output_base + '.blast', IsWritten=self.Parameters['--blast'].isOn()) # SAM alignments result['SAMAlignments'] =\ ResultPath(Path=output_base + '.sam', IsWritten=self.Parameters['--sam'].isOn()) # OTU map (mandatory output) result['OtuMap'] =\ ResultPath(Path=output_base + '_otus.txt', IsWritten=self.Parameters['--otu_map'].isOn()) # FASTA file of sequences in the OTU map (madatory output) result['FastaMatches'] =\ ResultPath(Path=output_base + fileExtension, IsWritten=self.Parameters['--fastx'].isOn()) # FASTA file of sequences not in the OTU map (mandatory output) result['FastaForDenovo'] =\ ResultPath(Path=output_base + '_denovo' + fileExtension, IsWritten=self.Parameters['--de_novo_otu'].isOn()) # Log file result['LogFile'] =\ ResultPath(Path=output_base + '.log', IsWritten=self.Parameters['--log'].isOn()) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "# get the file extension of the reads file (sortmerna", "# internally outputs all results with this extension)", "fileExtension", "=", "splitext", "(", "self", ".", "Parameters", "[", ...
Set the result paths
[ "Set", "the", "result", "paths" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/sortmerna_v2.py#L233-L280
kejbaly2/metrique
metrique/plotting.py
Plotter.get_color
def get_color(self, color): ''' Returns a color to use. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. ''' if color is None: color = self.counter if isinstance(color, str): color = CNAMES[color] self.counter = color + 1 color %= len(COLORS) return color
python
def get_color(self, color): ''' Returns a color to use. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. ''' if color is None: color = self.counter if isinstance(color, str): color = CNAMES[color] self.counter = color + 1 color %= len(COLORS) return color
[ "def", "get_color", "(", "self", ",", "color", ")", ":", "if", "color", "is", "None", ":", "color", "=", "self", ".", "counter", "if", "isinstance", "(", "color", ",", "str", ")", ":", "color", "=", "CNAMES", "[", "color", "]", "self", ".", "counte...
Returns a color to use. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES.
[ "Returns", "a", "color", "to", "use", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L81-L95
kejbaly2/metrique
metrique/plotting.py
Plotter.plot
def plot(self, series, label='', color=None, style=None): ''' Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if self.stacked: series += self.running_sum plt.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series plt.gca().set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style)
python
def plot(self, series, label='', color=None, style=None): ''' Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if self.stacked: series += self.running_sum plt.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series plt.gca().set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style)
[ "def", "plot", "(", "self", ",", "series", ",", "label", "=", "''", ",", "color", "=", "None", ",", "style", "=", "None", ")", ":", "color", "=", "self", ".", "get_color", "(", "color", ")", "if", "self", ".", "stacked", ":", "series", "+=", "sel...
Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot.
[ "Wrapper", "around", "plot", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L97-L119
kejbaly2/metrique
metrique/plotting.py
Plotter.plots
def plots(self, series_list, label_list, colors=None): ''' Plots all the series from the list. The assumption is that all of the series share the same index. :param list series_list: A list of series which should be plotted :param list label_list: A list of labels corresponding to the series :params list list_of_colors: A list of colors to use. ''' colors = colors or range(len(series_list)) for series, label, color in zip(series_list, label_list, colors): self.plot(series=series, label=label, color=color)
python
def plots(self, series_list, label_list, colors=None): ''' Plots all the series from the list. The assumption is that all of the series share the same index. :param list series_list: A list of series which should be plotted :param list label_list: A list of labels corresponding to the series :params list list_of_colors: A list of colors to use. ''' colors = colors or range(len(series_list)) for series, label, color in zip(series_list, label_list, colors): self.plot(series=series, label=label, color=color)
[ "def", "plots", "(", "self", ",", "series_list", ",", "label_list", ",", "colors", "=", "None", ")", ":", "colors", "=", "colors", "or", "range", "(", "len", "(", "series_list", ")", ")", "for", "series", ",", "label", ",", "color", "in", "zip", "(",...
Plots all the series from the list. The assumption is that all of the series share the same index. :param list series_list: A list of series which should be plotted :param list label_list: A list of labels corresponding to the series :params list list_of_colors: A list of colors to use.
[ "Plots", "all", "the", "series", "from", "the", "list", ".", "The", "assumption", "is", "that", "all", "of", "the", "series", "share", "the", "same", "index", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L121-L135
kejbaly2/metrique
metrique/plotting.py
Plotter.line
def line(self, x, label=None, y='bottom', color='grey', ax=None, **kwargs): ''' Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line. ''' if ax is None: ax = plt y0, y1 = ax.ylim() else: y0, y1 = ax.get_ylim() ax.axvline(x, color=color, **kwargs) if label is not None: verticalalignment = 'bottom' if y == 'bottom': y = y0 + (y1 - y0) / 25. if y == 'top': verticalalignment = 'top' y = y0 + (y1 - y0) * 24 / 25. ax.annotate('\n' + label, (x, y), rotation=90, verticalalignment=verticalalignment)
python
def line(self, x, label=None, y='bottom', color='grey', ax=None, **kwargs): ''' Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line. ''' if ax is None: ax = plt y0, y1 = ax.ylim() else: y0, y1 = ax.get_ylim() ax.axvline(x, color=color, **kwargs) if label is not None: verticalalignment = 'bottom' if y == 'bottom': y = y0 + (y1 - y0) / 25. if y == 'top': verticalalignment = 'top' y = y0 + (y1 - y0) * 24 / 25. ax.annotate('\n' + label, (x, y), rotation=90, verticalalignment=verticalalignment)
[ "def", "line", "(", "self", ",", "x", ",", "label", "=", "None", ",", "y", "=", "'bottom'", ",", "color", "=", "'grey'", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", "y0", ",", ...
Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line.
[ "Creates", "a", "vertical", "line", "in", "the", "plot", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L137-L166
kejbaly2/metrique
metrique/plotting.py
Plotter.lines
def lines(self, lines_dict, y='bottom', color='grey', **kwargs): ''' Creates vertical lines in the plot. :param lines_dict: A dictionary of label, x-coordinate pairs. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-labels. :param color color: The color of the lines. ''' for l, x in lines_dict.items(): self.line(x, l, y, color, **kwargs)
python
def lines(self, lines_dict, y='bottom', color='grey', **kwargs): ''' Creates vertical lines in the plot. :param lines_dict: A dictionary of label, x-coordinate pairs. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-labels. :param color color: The color of the lines. ''' for l, x in lines_dict.items(): self.line(x, l, y, color, **kwargs)
[ "def", "lines", "(", "self", ",", "lines_dict", ",", "y", "=", "'bottom'", ",", "color", "=", "'grey'", ",", "*", "*", "kwargs", ")", ":", "for", "l", ",", "x", "in", "lines_dict", ".", "items", "(", ")", ":", "self", ".", "line", "(", "x", ","...
Creates vertical lines in the plot. :param lines_dict: A dictionary of label, x-coordinate pairs. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-labels. :param color color: The color of the lines.
[ "Creates", "vertical", "lines", "in", "the", "plot", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L168-L181
kejbaly2/metrique
metrique/plotting.py
DiffPlotter.plot
def plot(self, series, series_diff=None, label='', color=None, style=None): ''' :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if series_diff is None and self.autodiffs: series_diff = series.diff() if self.stacked: series += self.running_sum self.ax1.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax1) if series_diff is not None: series_diff.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax2)
python
def plot(self, series, series_diff=None, label='', color=None, style=None): ''' :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if series_diff is None and self.autodiffs: series_diff = series.diff() if self.stacked: series += self.running_sum self.ax1.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax1) if series_diff is not None: series_diff.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax2)
[ "def", "plot", "(", "self", ",", "series", ",", "series_diff", "=", "None", ",", "label", "=", "''", ",", "color", "=", "None", ",", "style", "=", "None", ")", ":", "color", "=", "self", ".", "get_color", "(", "color", ")", "if", "series_diff", "is...
:param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot.
[ ":", "param", "pandas", ".", "Series", "series", ":", "The", "series", "to", "be", "plotted", "all", "values", "must", "be", "positive", "if", "stacked", "is", "True", ".", ":", "param", "pandas", ".", "Series", "series_diff", ":", "The", "series", "repr...
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L215-L244
kejbaly2/metrique
metrique/plotting.py
DiffPlotter.line
def line(self, x, label=None, y='bottom', color='grey', **kwargs): ''' Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line. ''' super(DiffPlotter, self).line(x, label, y, color, self.ax1, **kwargs) super(DiffPlotter, self).line(x, '', 0, color, self.ax2, **kwargs)
python
def line(self, x, label=None, y='bottom', color='grey', **kwargs): ''' Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line. ''' super(DiffPlotter, self).line(x, label, y, color, self.ax1, **kwargs) super(DiffPlotter, self).line(x, '', 0, color, self.ax2, **kwargs)
[ "def", "line", "(", "self", ",", "x", ",", "label", "=", "None", ",", "y", "=", "'bottom'", ",", "color", "=", "'grey'", ",", "*", "*", "kwargs", ")", ":", "super", "(", "DiffPlotter", ",", "self", ")", ".", "line", "(", "x", ",", "label", ",",...
Creates a vertical line in the plot. :param x: The x coordinate of the line. Should be in the same units as the x-axis. :param string label: The label to be displayed. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-label. :param color color: The color of the line.
[ "Creates", "a", "vertical", "line", "in", "the", "plot", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L246-L262
bitlabstudio/django-document-library
document_library/models.py
DocumentManager.published
def published(self, request=None): """ Returns the published documents in the current language. :param request: A Request instance. """ language = getattr(request, 'LANGUAGE_CODE', get_language()) if not language: return self.model.objects.none() qs = self.get_queryset() qs = qs.filter( translations__is_published=True, translations__language_code=language, ) # either it has no category or the one it has is published qs = qs.filter( models.Q(category__isnull=True) | models.Q(category__is_published=True)) return qs
python
def published(self, request=None): """ Returns the published documents in the current language. :param request: A Request instance. """ language = getattr(request, 'LANGUAGE_CODE', get_language()) if not language: return self.model.objects.none() qs = self.get_queryset() qs = qs.filter( translations__is_published=True, translations__language_code=language, ) # either it has no category or the one it has is published qs = qs.filter( models.Q(category__isnull=True) | models.Q(category__is_published=True)) return qs
[ "def", "published", "(", "self", ",", "request", "=", "None", ")", ":", "language", "=", "getattr", "(", "request", ",", "'LANGUAGE_CODE'", ",", "get_language", "(", ")", ")", "if", "not", "language", ":", "return", "self", ".", "model", ".", "objects", ...
Returns the published documents in the current language. :param request: A Request instance.
[ "Returns", "the", "published", "documents", "in", "the", "current", "language", "." ]
train
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/models.py#L94-L114
9b/frisbee
frisbee/modules/bing.py
Module._format
def _format(self): """Format search queries to perform in bulk. Build up the URLs to call for the search engine. These will be ran through a bulk processor and returned to a detailer. """ self.log.debug("Formatting URLs to request") items = list() for i in range(0, self.limit, 10): query = '"%s" %s' % (self.domain, self.modifier) url = self.host + "/search?q=" + query + "&first=" + str(i) items.append(url) self.log.debug("URLs were generated") return items
python
def _format(self): """Format search queries to perform in bulk. Build up the URLs to call for the search engine. These will be ran through a bulk processor and returned to a detailer. """ self.log.debug("Formatting URLs to request") items = list() for i in range(0, self.limit, 10): query = '"%s" %s' % (self.domain, self.modifier) url = self.host + "/search?q=" + query + "&first=" + str(i) items.append(url) self.log.debug("URLs were generated") return items
[ "def", "_format", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Formatting URLs to request\"", ")", "items", "=", "list", "(", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "limit", ",", "10", ")", ":", "query", "="...
Format search queries to perform in bulk. Build up the URLs to call for the search engine. These will be ran through a bulk processor and returned to a detailer.
[ "Format", "search", "queries", "to", "perform", "in", "bulk", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L30-L43
9b/frisbee
frisbee/modules/bing.py
Module._process
def _process(self, responses): """Process search engine results for detailed analysis. Search engine result pages (SERPs) come back with each request and will need to be extracted in order to crawl the actual hits. """ self.log.debug("Processing search results") items = list() for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") except: continue else: listings = soup.findAll('li', {'class': 'b_algo'}) items.extend([l.find('a')['href'] for l in listings]) self.log.debug("Search result URLs were extracted") return items
python
def _process(self, responses): """Process search engine results for detailed analysis. Search engine result pages (SERPs) come back with each request and will need to be extracted in order to crawl the actual hits. """ self.log.debug("Processing search results") items = list() for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") except: continue else: listings = soup.findAll('li', {'class': 'b_algo'}) items.extend([l.find('a')['href'] for l in listings]) self.log.debug("Search result URLs were extracted") return items
[ "def", "_process", "(", "self", ",", "responses", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Processing search results\"", ")", "items", "=", "list", "(", ")", "for", "response", "in", "responses", ":", "try", ":", "soup", "=", "BeautifulSoup", ...
Process search engine results for detailed analysis. Search engine result pages (SERPs) come back with each request and will need to be extracted in order to crawl the actual hits.
[ "Process", "search", "engine", "results", "for", "detailed", "analysis", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L45-L63
9b/frisbee
frisbee/modules/bing.py
Module._fetch
def _fetch(self, urls): """Perform bulk collection of data and return the content. Gathering responses is handled by the base class and uses futures to speed up the processing. Response data is saved inside a local variable to be used later in extraction. """ responses = self._request_bulk(urls) for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") text = soup.get_text() except Exception: text = response.text self.data.append(text) # Opportunistic findings return responses
python
def _fetch(self, urls): """Perform bulk collection of data and return the content. Gathering responses is handled by the base class and uses futures to speed up the processing. Response data is saved inside a local variable to be used later in extraction. """ responses = self._request_bulk(urls) for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") text = soup.get_text() except Exception: text = response.text self.data.append(text) # Opportunistic findings return responses
[ "def", "_fetch", "(", "self", ",", "urls", ")", ":", "responses", "=", "self", ".", "_request_bulk", "(", "urls", ")", "for", "response", "in", "responses", ":", "try", ":", "soup", "=", "BeautifulSoup", "(", "response", ".", "content", ",", "'html.parse...
Perform bulk collection of data and return the content. Gathering responses is handled by the base class and uses futures to speed up the processing. Response data is saved inside a local variable to be used later in extraction.
[ "Perform", "bulk", "collection", "of", "data", "and", "return", "the", "content", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L65-L81
9b/frisbee
frisbee/modules/bing.py
Module._extract
def _extract(self): """Extract email addresses from results. Text content from all crawled pages are ran through a simple email extractor. Data is cleaned prior to running pattern expressions. """ self.log.debug("Extracting emails from text content") for item in self.data: emails = extract_emails(item, self.domain, self.fuzzy) self.results.extend(emails) self.log.debug("Email extraction completed") return list(set(self.results))
python
def _extract(self): """Extract email addresses from results. Text content from all crawled pages are ran through a simple email extractor. Data is cleaned prior to running pattern expressions. """ self.log.debug("Extracting emails from text content") for item in self.data: emails = extract_emails(item, self.domain, self.fuzzy) self.results.extend(emails) self.log.debug("Email extraction completed") return list(set(self.results))
[ "def", "_extract", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Extracting emails from text content\"", ")", "for", "item", "in", "self", ".", "data", ":", "emails", "=", "extract_emails", "(", "item", ",", "self", ".", "domain", ",", ...
Extract email addresses from results. Text content from all crawled pages are ran through a simple email extractor. Data is cleaned prior to running pattern expressions.
[ "Extract", "email", "addresses", "from", "results", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L83-L94
9b/frisbee
frisbee/modules/bing.py
Module.search
def search(self): """Run the full search process. Simple public method to abstract the steps needed to produce a full search using the engine. """ requests = self._format() serps = self._fetch(requests) urls = self._process(serps) details = self._fetch(urls) emails = self._extract() return {'emails': emails, 'processed': len(self.data)}
python
def search(self): """Run the full search process. Simple public method to abstract the steps needed to produce a full search using the engine. """ requests = self._format() serps = self._fetch(requests) urls = self._process(serps) details = self._fetch(urls) emails = self._extract() return {'emails': emails, 'processed': len(self.data)}
[ "def", "search", "(", "self", ")", ":", "requests", "=", "self", ".", "_format", "(", ")", "serps", "=", "self", ".", "_fetch", "(", "requests", ")", "urls", "=", "self", ".", "_process", "(", "serps", ")", "details", "=", "self", ".", "_fetch", "(...
Run the full search process. Simple public method to abstract the steps needed to produce a full search using the engine.
[ "Run", "the", "full", "search", "process", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L96-L107
biocore/burrito
burrito/util.py
which
def which(executable_name, env_var='PATH'): """Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html """ exec_fp = None if env_var in os.environ: paths = os.environ[env_var] for path in paths.split(os.pathsep): curr_exec_fp = os.path.join(path, executable_name) if os.access(curr_exec_fp, os.X_OK): exec_fp = curr_exec_fp break return exec_fp
python
def which(executable_name, env_var='PATH'): """Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html """ exec_fp = None if env_var in os.environ: paths = os.environ[env_var] for path in paths.split(os.pathsep): curr_exec_fp = os.path.join(path, executable_name) if os.access(curr_exec_fp, os.X_OK): exec_fp = curr_exec_fp break return exec_fp
[ "def", "which", "(", "executable_name", ",", "env_var", "=", "'PATH'", ")", ":", "exec_fp", "=", "None", "if", "env_var", "in", "os", ".", "environ", ":", "paths", "=", "os", ".", "environ", "[", "env_var", "]", "for", "path", "in", "paths", ".", "sp...
Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html
[ "Equivalent", "to", "which", "executable_name", "in", "a", "*", "nix", "environment", "." ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L26-L52
biocore/burrito
burrito/util.py
cmdline_generator
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None, PathsToInputs=None, PathToOutput=None, PathToStderr='/dev/null', PathToStdout='/dev/null', UniqueOutputs=False, InputParam=None, OutputParam=None): """Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used) """ # Make sure we have input(s) and output if not PathsToInputs: raise ValueError("No input file(s) specified.") if not PathToOutput: raise ValueError("No output file specified.") if not isinstance(PathsToInputs, list): PathsToInputs = [PathsToInputs] # PathToBin and PathToCmd can be blank if PathToBin is None: PathToBin = '' if PathToCmd is None: PathToCmd = '' # stdout_ and stderr_ do not have to be redirected if PathToStdout is None: stdout_ = '' else: stdout_ = '> "%s"' % PathToStdout if PathToStderr is None: stderr_ = '' else: stderr_ = '2> "%s"' % PathToStderr # Output can be redirected to stdout or specified output argument if OutputParam is None: output = '> "%s"' % PathToOutput stdout_ = '' else: output_param = param_iter.AppParams[OutputParam] output_param.on('"%s"' % PathToOutput) output = str(output_param) output_param.off() output_count = 0 base_command = ' '.join([PathToBin, PathToCmd]) for params in param_iter: # Support for multiple input files for inputfile in PathsToInputs: cmdline = [base_command] cmdline.extend(sorted(filter(None, map(str, params.values())))) # Input can come from stdin or specified input argument if InputParam is None: input = '< "%s"' % inputfile else: input_param = params[InputParam] input_param.on('"%s"' % inputfile) input = str(input_param) input_param.off() cmdline.append(input) if UniqueOutputs: cmdline.append(''.join([output, str(output_count)])) output_count += 1 else: cmdline.append(output) cmdline.append(stdout_) cmdline.append(stderr_) yield ' '.join(cmdline)
python
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None, PathsToInputs=None, PathToOutput=None, PathToStderr='/dev/null', PathToStdout='/dev/null', UniqueOutputs=False, InputParam=None, OutputParam=None): """Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used) """ # Make sure we have input(s) and output if not PathsToInputs: raise ValueError("No input file(s) specified.") if not PathToOutput: raise ValueError("No output file specified.") if not isinstance(PathsToInputs, list): PathsToInputs = [PathsToInputs] # PathToBin and PathToCmd can be blank if PathToBin is None: PathToBin = '' if PathToCmd is None: PathToCmd = '' # stdout_ and stderr_ do not have to be redirected if PathToStdout is None: stdout_ = '' else: stdout_ = '> "%s"' % PathToStdout if PathToStderr is None: stderr_ = '' else: stderr_ = '2> "%s"' % PathToStderr # Output can be redirected to stdout or specified output argument if OutputParam is None: output = '> "%s"' % PathToOutput stdout_ = '' else: output_param = param_iter.AppParams[OutputParam] output_param.on('"%s"' % PathToOutput) output = str(output_param) output_param.off() output_count = 0 base_command = ' '.join([PathToBin, PathToCmd]) for params in param_iter: # Support for multiple input files for inputfile in PathsToInputs: cmdline = [base_command] cmdline.extend(sorted(filter(None, map(str, params.values())))) # Input can come from stdin or specified input argument if InputParam is None: input = '< "%s"' % inputfile else: input_param = params[InputParam] input_param.on('"%s"' % inputfile) input = str(input_param) input_param.off() cmdline.append(input) if UniqueOutputs: cmdline.append(''.join([output, str(output_count)])) output_count += 1 else: cmdline.append(output) cmdline.append(stdout_) cmdline.append(stderr_) yield ' '.join(cmdline)
[ "def", "cmdline_generator", "(", "param_iter", ",", "PathToBin", "=", "None", ",", "PathToCmd", "=", "None", ",", "PathsToInputs", "=", "None", ",", "PathToOutput", "=", "None", ",", "PathToStderr", "=", "'/dev/null'", ",", "PathToStdout", "=", "'/dev/null'", ...
Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used)
[ "Generates", "command", "lines", "that", "can", "be", "used", "in", "a", "cluster", "environment" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L660-L743
biocore/burrito
burrito/util.py
get_tmp_filename
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt", result_constructor=FilePath): """ Generate a temporary filename and return as a FilePath object tmp_dir: the directory to house the tmp_filename prefix: string to append to beginning of filename Note: It is very useful to have prefix be descriptive of the process which is creating the temporary file. For example, if your temp file will be used to build a temporary blast database, you might pass prefix=TempBlastDB suffix: the suffix to be appended to the temp filename result_constructor: the constructor used to build the result filename (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = "" # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" chars = "abcdefghigklmnopqrstuvwxyz" picks = chars + chars.upper() + "0123456790" return result_constructor(tmp_dir) + result_constructor(prefix) +\ result_constructor("%s%s" % (''.join([choice(picks) for i in range(20)]), suffix))
python
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt", result_constructor=FilePath): """ Generate a temporary filename and return as a FilePath object tmp_dir: the directory to house the tmp_filename prefix: string to append to beginning of filename Note: It is very useful to have prefix be descriptive of the process which is creating the temporary file. For example, if your temp file will be used to build a temporary blast database, you might pass prefix=TempBlastDB suffix: the suffix to be appended to the temp filename result_constructor: the constructor used to build the result filename (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = "" # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" chars = "abcdefghigklmnopqrstuvwxyz" picks = chars + chars.upper() + "0123456790" return result_constructor(tmp_dir) + result_constructor(prefix) +\ result_constructor("%s%s" % (''.join([choice(picks) for i in range(20)]), suffix))
[ "def", "get_tmp_filename", "(", "tmp_dir", "=", "gettempdir", "(", ")", ",", "prefix", "=", "\"tmp\"", ",", "suffix", "=", "\".txt\"", ",", "result_constructor", "=", "FilePath", ")", ":", "# check not none", "if", "not", "tmp_dir", ":", "tmp_dir", "=", "\"\...
Generate a temporary filename and return as a FilePath object tmp_dir: the directory to house the tmp_filename prefix: string to append to beginning of filename Note: It is very useful to have prefix be descriptive of the process which is creating the temporary file. For example, if your temp file will be used to build a temporary blast database, you might pass prefix=TempBlastDB suffix: the suffix to be appended to the temp filename result_constructor: the constructor used to build the result filename (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor.
[ "Generate", "a", "temporary", "filename", "and", "return", "as", "a", "FilePath", "object" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L746-L775
biocore/burrito
burrito/util.py
guess_input_handler
def guess_input_handler(seqs, add_seq_names=False): """Returns the name of the input handler for seqs.""" if isinstance(seqs, str): if '\n' in seqs: # can't be a filename... return '_input_as_multiline_string' else: # assume it was a filename return '_input_as_string' if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple): return '_input_as_seq_id_seq_pairs' if add_seq_names: return '_input_as_seqs' return '_input_as_lines'
python
def guess_input_handler(seqs, add_seq_names=False): """Returns the name of the input handler for seqs.""" if isinstance(seqs, str): if '\n' in seqs: # can't be a filename... return '_input_as_multiline_string' else: # assume it was a filename return '_input_as_string' if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple): return '_input_as_seq_id_seq_pairs' if add_seq_names: return '_input_as_seqs' return '_input_as_lines'
[ "def", "guess_input_handler", "(", "seqs", ",", "add_seq_names", "=", "False", ")", ":", "if", "isinstance", "(", "seqs", ",", "str", ")", ":", "if", "'\\n'", "in", "seqs", ":", "# can't be a filename...", "return", "'_input_as_multiline_string'", "else", ":", ...
Returns the name of the input handler for seqs.
[ "Returns", "the", "name", "of", "the", "input", "handler", "for", "seqs", "." ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L778-L792
biocore/burrito
burrito/util.py
CommandLineAppResult.cleanUp
def cleanUp(self): """ Delete files that are written by CommandLineApplication from disk WARNING: after cleanUp() you may still have access to part of your result data, but you should be aware that if the file size exceeds the size of the buffer you will only have part of the file. To be safe, you should not use cleanUp() until you are done with the file or have copied it to a different location. """ file_keys = self.file_keys for item in file_keys: if self[item] is not None: self[item].close() remove(self[item].name) # remove input handler temp files if hasattr(self, "_input_filename"): remove(self._input_filename)
python
def cleanUp(self): """ Delete files that are written by CommandLineApplication from disk WARNING: after cleanUp() you may still have access to part of your result data, but you should be aware that if the file size exceeds the size of the buffer you will only have part of the file. To be safe, you should not use cleanUp() until you are done with the file or have copied it to a different location. """ file_keys = self.file_keys for item in file_keys: if self[item] is not None: self[item].close() remove(self[item].name) # remove input handler temp files if hasattr(self, "_input_filename"): remove(self._input_filename)
[ "def", "cleanUp", "(", "self", ")", ":", "file_keys", "=", "self", ".", "file_keys", "for", "item", "in", "file_keys", ":", "if", "self", "[", "item", "]", "is", "not", "None", ":", "self", "[", "item", "]", ".", "close", "(", ")", "remove", "(", ...
Delete files that are written by CommandLineApplication from disk WARNING: after cleanUp() you may still have access to part of your result data, but you should be aware that if the file size exceeds the size of the buffer you will only have part of the file. To be safe, you should not use cleanUp() until you are done with the file or have copied it to a different location.
[ "Delete", "files", "that", "are", "written", "by", "CommandLineApplication", "from", "disk" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L108-L126
biocore/burrito
burrito/util.py
CommandLineApplication._input_as_multiline_string
def _input_as_multiline_string(self, data): """Write a multiline string to a temp file and return the filename. data: a multiline string to be written to a file. * Note: the result will be the filename as a FilePath object (which is a string subclass). """ filename = self._input_filename = \ FilePath(self.getTmpFilename(self.TmpDir)) data_file = open(filename, 'w') data_file.write(data) data_file.close() return filename
python
def _input_as_multiline_string(self, data): """Write a multiline string to a temp file and return the filename. data: a multiline string to be written to a file. * Note: the result will be the filename as a FilePath object (which is a string subclass). """ filename = self._input_filename = \ FilePath(self.getTmpFilename(self.TmpDir)) data_file = open(filename, 'w') data_file.write(data) data_file.close() return filename
[ "def", "_input_as_multiline_string", "(", "self", ",", "data", ")", ":", "filename", "=", "self", ".", "_input_filename", "=", "FilePath", "(", "self", ".", "getTmpFilename", "(", "self", ".", "TmpDir", ")", ")", "data_file", "=", "open", "(", "filename", ...
Write a multiline string to a temp file and return the filename. data: a multiline string to be written to a file. * Note: the result will be the filename as a FilePath object (which is a string subclass).
[ "Write", "a", "multiline", "string", "to", "a", "temp", "file", "and", "return", "the", "filename", "." ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L332-L346
biocore/burrito
burrito/util.py
CommandLineApplication._input_as_lines
def _input_as_lines(self, data): """ Write a seq of lines to a temp file and return the filename string data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: the result will be the filename as a FilePath object (which is a string subclass). * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ filename = self._input_filename = \ FilePath(self.getTmpFilename(self.TmpDir)) filename = FilePath(filename) data_file = open(filename, 'w') data_to_file = '\n'.join([str(d).strip('\n') for d in data]) data_file.write(data_to_file) data_file.close() return filename
python
def _input_as_lines(self, data): """ Write a seq of lines to a temp file and return the filename string data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: the result will be the filename as a FilePath object (which is a string subclass). * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ filename = self._input_filename = \ FilePath(self.getTmpFilename(self.TmpDir)) filename = FilePath(filename) data_file = open(filename, 'w') data_to_file = '\n'.join([str(d).strip('\n') for d in data]) data_file.write(data_to_file) data_file.close() return filename
[ "def", "_input_as_lines", "(", "self", ",", "data", ")", ":", "filename", "=", "self", ".", "_input_filename", "=", "FilePath", "(", "self", ".", "getTmpFilename", "(", "self", ".", "TmpDir", ")", ")", "filename", "=", "FilePath", "(", "filename", ")", "...
Write a seq of lines to a temp file and return the filename string data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: the result will be the filename as a FilePath object (which is a string subclass). * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file
[ "Write", "a", "seq", "of", "lines", "to", "a", "temp", "file", "and", "return", "the", "filename", "string" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L348-L367
biocore/burrito
burrito/util.py
CommandLineApplication._input_as_paths
def _input_as_paths(self, data): """ Return data as a space delimited string with each path quoted data: paths or filenames, most likely as a list of strings """ return self._command_delimiter.join( map(str, map(self._input_as_path, data)))
python
def _input_as_paths(self, data): """ Return data as a space delimited string with each path quoted data: paths or filenames, most likely as a list of strings """ return self._command_delimiter.join( map(str, map(self._input_as_path, data)))
[ "def", "_input_as_paths", "(", "self", ",", "data", ")", ":", "return", "self", ".", "_command_delimiter", ".", "join", "(", "map", "(", "str", ",", "map", "(", "self", ".", "_input_as_path", ",", "data", ")", ")", ")" ]
Return data as a space delimited string with each path quoted data: paths or filenames, most likely as a list of strings
[ "Return", "data", "as", "a", "space", "delimited", "string", "with", "each", "path", "quoted" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L380-L388
biocore/burrito
burrito/util.py
CommandLineApplication._absolute
def _absolute(self, path): """ Convert a filename to an absolute path """ path = FilePath(path) if isabs(path): return path else: # these are both Path objects, so joining with + is acceptable return self.WorkingDir + path
python
def _absolute(self, path): """ Convert a filename to an absolute path """ path = FilePath(path) if isabs(path): return path else: # these are both Path objects, so joining with + is acceptable return self.WorkingDir + path
[ "def", "_absolute", "(", "self", ",", "path", ")", ":", "path", "=", "FilePath", "(", "path", ")", "if", "isabs", "(", "path", ")", ":", "return", "path", "else", ":", "# these are both Path objects, so joining with + is acceptable", "return", "self", ".", "Wo...
Convert a filename to an absolute path
[ "Convert", "a", "filename", "to", "an", "absolute", "path" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L390-L397
biocore/burrito
burrito/util.py
CommandLineApplication._get_base_command
def _get_base_command(self): """ Returns the full command string input_arg: the argument to the command which represents the input to the program, this will be a string, either representing input or a filename to get input from tI""" command_parts = [] # Append a change directory to the beginning of the command to change # to self.WorkingDir before running the command # WorkingDir should be in quotes -- filenames might contain spaces cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) if self._command is None: raise ApplicationError('_command has not been set.') command = self._command parameters = self.Parameters command_parts.append(cd_command) command_parts.append(command) command_parts.append(self._command_delimiter.join(filter( None, (map(str, parameters.values()))))) return self._command_delimiter.join(command_parts).strip()
python
def _get_base_command(self): """ Returns the full command string input_arg: the argument to the command which represents the input to the program, this will be a string, either representing input or a filename to get input from tI""" command_parts = [] # Append a change directory to the beginning of the command to change # to self.WorkingDir before running the command # WorkingDir should be in quotes -- filenames might contain spaces cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) if self._command is None: raise ApplicationError('_command has not been set.') command = self._command parameters = self.Parameters command_parts.append(cd_command) command_parts.append(command) command_parts.append(self._command_delimiter.join(filter( None, (map(str, parameters.values()))))) return self._command_delimiter.join(command_parts).strip()
[ "def", "_get_base_command", "(", "self", ")", ":", "command_parts", "=", "[", "]", "# Append a change directory to the beginning of the command to change", "# to self.WorkingDir before running the command", "# WorkingDir should be in quotes -- filenames might contain spaces", "cd_command",...
Returns the full command string input_arg: the argument to the command which represents the input to the program, this will be a string, either representing input or a filename to get input from tI
[ "Returns", "the", "full", "command", "string" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L399-L421
biocore/burrito
burrito/util.py
CommandLineApplication._error_on_missing_application
def _error_on_missing_application(self, params): """ Raise an ApplicationNotFoundError if the app is not accessible This method checks in the system path (usually $PATH) or for the existence of self._command. If self._command is not found in either place, an ApplicationNotFoundError is raised to inform the user that the application they are trying to access is not available. This method should be overwritten when self._command does not represent the relevant executable (e.g., self._command = 'prog -a') or in more complex cases where the file to be executed may be passed as a parameter (e.g., with java jar files, where the jar file is passed to java via '-jar'). It can also be overwritten to by-pass testing for application presence by never raising an error. """ command = self._command # strip off " characters, in case we got a FilePath object found_in_path = which(command.strip('"')) is not None if not (exists(command) or found_in_path): raise ApplicationNotFoundError("Cannot find %s. Is it installed? " "Is it in your path?" % command)
python
def _error_on_missing_application(self, params): """ Raise an ApplicationNotFoundError if the app is not accessible This method checks in the system path (usually $PATH) or for the existence of self._command. If self._command is not found in either place, an ApplicationNotFoundError is raised to inform the user that the application they are trying to access is not available. This method should be overwritten when self._command does not represent the relevant executable (e.g., self._command = 'prog -a') or in more complex cases where the file to be executed may be passed as a parameter (e.g., with java jar files, where the jar file is passed to java via '-jar'). It can also be overwritten to by-pass testing for application presence by never raising an error. """ command = self._command # strip off " characters, in case we got a FilePath object found_in_path = which(command.strip('"')) is not None if not (exists(command) or found_in_path): raise ApplicationNotFoundError("Cannot find %s. Is it installed? " "Is it in your path?" % command)
[ "def", "_error_on_missing_application", "(", "self", ",", "params", ")", ":", "command", "=", "self", ".", "_command", "# strip off \" characters, in case we got a FilePath object", "found_in_path", "=", "which", "(", "command", ".", "strip", "(", "'\"'", ")", ")", ...
Raise an ApplicationNotFoundError if the app is not accessible This method checks in the system path (usually $PATH) or for the existence of self._command. If self._command is not found in either place, an ApplicationNotFoundError is raised to inform the user that the application they are trying to access is not available. This method should be overwritten when self._command does not represent the relevant executable (e.g., self._command = 'prog -a') or in more complex cases where the file to be executed may be passed as a parameter (e.g., with java jar files, where the jar file is passed to java via '-jar'). It can also be overwritten to by-pass testing for application presence by never raising an error.
[ "Raise", "an", "ApplicationNotFoundError", "if", "the", "app", "is", "not", "accessible" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L447-L469
biocore/burrito
burrito/util.py
CommandLineApplication.getTmpFilename
def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt', include_class_id=False, result_constructor=FilePath): """ Return a temp filename tmp_dir: directory where temporary files will be stored prefix: text to append to start of file name suffix: text to append to end of file name include_class_id: if True, will append a class identifier (built from the class name) to the filename following prefix. This is False by default b/c there is some string processing overhead in getting the class name. This will probably be most useful for testing: if temp files are being left behind by tests, you can turn this on in here (temporarily) to find out which tests are leaving the temp files. result_constructor: the constructor used to build the result (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = self.TmpDir # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" if include_class_id: # Append the classname to the prefix from the class name # so any problematic temp files can be associated with # the class that created them. This should be especially # useful for testing, but is turned off by default to # avoid the string-parsing overhead. class_id = str(self.__class__()) prefix = ''.join([prefix, class_id[class_id.rindex('.') + 1: class_id.index(' ')]]) try: mkdir(tmp_dir) except OSError: # Directory already exists pass # note: it is OK to join FilePath objects with + return result_constructor(tmp_dir) + result_constructor(prefix) + \ result_constructor(''.join([choice(_all_chars) for i in range(self.TmpNameLen)])) +\ result_constructor(suffix)
python
def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt', include_class_id=False, result_constructor=FilePath): """ Return a temp filename tmp_dir: directory where temporary files will be stored prefix: text to append to start of file name suffix: text to append to end of file name include_class_id: if True, will append a class identifier (built from the class name) to the filename following prefix. This is False by default b/c there is some string processing overhead in getting the class name. This will probably be most useful for testing: if temp files are being left behind by tests, you can turn this on in here (temporarily) to find out which tests are leaving the temp files. result_constructor: the constructor used to build the result (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = self.TmpDir # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" if include_class_id: # Append the classname to the prefix from the class name # so any problematic temp files can be associated with # the class that created them. This should be especially # useful for testing, but is turned off by default to # avoid the string-parsing overhead. class_id = str(self.__class__()) prefix = ''.join([prefix, class_id[class_id.rindex('.') + 1: class_id.index(' ')]]) try: mkdir(tmp_dir) except OSError: # Directory already exists pass # note: it is OK to join FilePath objects with + return result_constructor(tmp_dir) + result_constructor(prefix) + \ result_constructor(''.join([choice(_all_chars) for i in range(self.TmpNameLen)])) +\ result_constructor(suffix)
[ "def", "getTmpFilename", "(", "self", ",", "tmp_dir", "=", "None", ",", "prefix", "=", "'tmp'", ",", "suffix", "=", "'.txt'", ",", "include_class_id", "=", "False", ",", "result_constructor", "=", "FilePath", ")", ":", "# check not none", "if", "not", "tmp_d...
Return a temp filename tmp_dir: directory where temporary files will be stored prefix: text to append to start of file name suffix: text to append to end of file name include_class_id: if True, will append a class identifier (built from the class name) to the filename following prefix. This is False by default b/c there is some string processing overhead in getting the class name. This will probably be most useful for testing: if temp files are being left behind by tests, you can turn this on in here (temporarily) to find out which tests are leaving the temp files. result_constructor: the constructor used to build the result (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor.
[ "Return", "a", "temp", "filename" ]
train
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L500-L548
bitlabstudio/django-document-library
document_library/south_migrations/0005_init_slug.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for category in orm['document_library.DocumentCategory'].objects.all(): category.slug = category.documentcategorytitle_set.all()[0].title.lower() category.save()
python
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for category in orm['document_library.DocumentCategory'].objects.all(): category.slug = category.documentcategorytitle_set.all()[0].title.lower() category.save()
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "# Note: Remember to use orm['appname.ModelName'] rather than \"from appname.models...\"", "for", "category", "in", "orm", "[", "'document_library.DocumentCategory'", "]", ".", "objects", ".", "all", "(", ")", ":", "...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
train
https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/south_migrations/0005_init_slug.py#L24-L29
BoGoEngine/bogo-python
bogo/accent.py
get_accent_char
def get_accent_char(char): """ Get the accent of an single char, if any. """ index = utils.VOWELS.find(char.lower()) if (index != -1): return 5 - index % 6 else: return Accent.NONE
python
def get_accent_char(char): """ Get the accent of an single char, if any. """ index = utils.VOWELS.find(char.lower()) if (index != -1): return 5 - index % 6 else: return Accent.NONE
[ "def", "get_accent_char", "(", "char", ")", ":", "index", "=", "utils", ".", "VOWELS", ".", "find", "(", "char", ".", "lower", "(", ")", ")", "if", "(", "index", "!=", "-", "1", ")", ":", "return", "5", "-", "index", "%", "6", "else", ":", "ret...
Get the accent of an single char, if any.
[ "Get", "the", "accent", "of", "an", "single", "char", "if", "any", "." ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L45-L53
BoGoEngine/bogo-python
bogo/accent.py
get_accent_string
def get_accent_string(string): """ Get the first accent from the right of a string. """ accents = list(filter(lambda accent: accent != Accent.NONE, map(get_accent_char, string))) return accents[-1] if accents else Accent.NONE
python
def get_accent_string(string): """ Get the first accent from the right of a string. """ accents = list(filter(lambda accent: accent != Accent.NONE, map(get_accent_char, string))) return accents[-1] if accents else Accent.NONE
[ "def", "get_accent_string", "(", "string", ")", ":", "accents", "=", "list", "(", "filter", "(", "lambda", "accent", ":", "accent", "!=", "Accent", ".", "NONE", ",", "map", "(", "get_accent_char", ",", "string", ")", ")", ")", "return", "accents", "[", ...
Get the first accent from the right of a string.
[ "Get", "the", "first", "accent", "from", "the", "right", "of", "a", "string", "." ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L56-L62
BoGoEngine/bogo-python
bogo/accent.py
add_accent
def add_accent(components, accent): """ Add accent to the given components. The parameter components is the result of function separate() """ vowel = components[1] last_consonant = components[2] if accent == Accent.NONE: vowel = remove_accent_string(vowel) return [components[0], vowel, last_consonant] if vowel == "": return components #raw_string is a list, not a str object raw_string = remove_accent_string(vowel).lower() new_vowel = "" # Highest priority for ê and ơ index = max(raw_string.find("ê"), raw_string.find("ơ")) if index != -1: new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:] elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""): new_vowel = add_accent_char(vowel[0], accent) + vowel[1:] else: new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:] return [components[0], new_vowel, components[2]]
python
def add_accent(components, accent): """ Add accent to the given components. The parameter components is the result of function separate() """ vowel = components[1] last_consonant = components[2] if accent == Accent.NONE: vowel = remove_accent_string(vowel) return [components[0], vowel, last_consonant] if vowel == "": return components #raw_string is a list, not a str object raw_string = remove_accent_string(vowel).lower() new_vowel = "" # Highest priority for ê and ơ index = max(raw_string.find("ê"), raw_string.find("ơ")) if index != -1: new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:] elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""): new_vowel = add_accent_char(vowel[0], accent) + vowel[1:] else: new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:] return [components[0], new_vowel, components[2]]
[ "def", "add_accent", "(", "components", ",", "accent", ")", ":", "vowel", "=", "components", "[", "1", "]", "last_consonant", "=", "components", "[", "2", "]", "if", "accent", "==", "Accent", ".", "NONE", ":", "vowel", "=", "remove_accent_string", "(", "...
Add accent to the given components. The parameter components is the result of function separate()
[ "Add", "accent", "to", "the", "given", "components", ".", "The", "parameter", "components", "is", "the", "result", "of", "function", "separate", "()" ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L65-L89
BoGoEngine/bogo-python
bogo/accent.py
add_accent_char
def add_accent_char(char, accent): """ Add accent to a single char. Parameter accent is member of class Accent """ if char == "": return "" case = char.isupper() char = char.lower() index = utils.VOWELS.find(char) if (index != -1): index = index - index % 6 + 5 char = utils.VOWELS[index - accent] return utils.change_case(char, case)
python
def add_accent_char(char, accent): """ Add accent to a single char. Parameter accent is member of class Accent """ if char == "": return "" case = char.isupper() char = char.lower() index = utils.VOWELS.find(char) if (index != -1): index = index - index % 6 + 5 char = utils.VOWELS[index - accent] return utils.change_case(char, case)
[ "def", "add_accent_char", "(", "char", ",", "accent", ")", ":", "if", "char", "==", "\"\"", ":", "return", "\"\"", "case", "=", "char", ".", "isupper", "(", ")", "char", "=", "char", ".", "lower", "(", ")", "index", "=", "utils", ".", "VOWELS", "."...
Add accent to a single char. Parameter accent is member of class Accent
[ "Add", "accent", "to", "a", "single", "char", ".", "Parameter", "accent", "is", "member", "of", "class", "Accent" ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L92-L105
BoGoEngine/bogo-python
bogo/accent.py
add_accent_at
def add_accent_at(string, accent, index): """ Add mark to the index-th character of the given string. Return the new string after applying change. (unused) """ if index == -1: return string # Python can handle the case which index is out of range of given string return string[:index] + \ accent.accent.add_accent_char(string[index], accent) + \ string[index+1:]
python
def add_accent_at(string, accent, index): """ Add mark to the index-th character of the given string. Return the new string after applying change. (unused) """ if index == -1: return string # Python can handle the case which index is out of range of given string return string[:index] + \ accent.accent.add_accent_char(string[index], accent) + \ string[index+1:]
[ "def", "add_accent_at", "(", "string", ",", "accent", ",", "index", ")", ":", "if", "index", "==", "-", "1", ":", "return", "string", "# Python can handle the case which index is out of range of given string", "return", "string", "[", ":", "index", "]", "+", "acce...
Add mark to the index-th character of the given string. Return the new string after applying change. (unused)
[ "Add", "mark", "to", "the", "index", "-", "th", "character", "of", "the", "given", "string", ".", "Return", "the", "new", "string", "after", "applying", "change", ".", "(", "unused", ")" ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L108-L119
BoGoEngine/bogo-python
bogo/accent.py
remove_accent_string
def remove_accent_string(string): """ Remove all accent from a whole string. """ return utils.join([add_accent_char(c, Accent.NONE) for c in string])
python
def remove_accent_string(string): """ Remove all accent from a whole string. """ return utils.join([add_accent_char(c, Accent.NONE) for c in string])
[ "def", "remove_accent_string", "(", "string", ")", ":", "return", "utils", ".", "join", "(", "[", "add_accent_char", "(", "c", ",", "Accent", ".", "NONE", ")", "for", "c", "in", "string", "]", ")" ]
Remove all accent from a whole string.
[ "Remove", "all", "accent", "from", "a", "whole", "string", "." ]
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L129-L133
biocore/burrito-fillings
bfillings/rdp_classifier.py
parse_command_line_parameters
def parse_command_line_parameters(argv=None): """ Parses command line arguments """ usage =\ 'usage: %prog [options] input_sequences_filepath' version = 'Version: %prog ' + __version__ parser = OptionParser(usage=usage, version=version) parser.add_option('-o', '--output_fp', action='store', type='string', dest='output_fp', help='Path to store ' + 'output file [default: generated from input_sequences_filepath]') parser.add_option('-c', '--min_confidence', action='store', type='float', dest='min_confidence', help='minimum confidence ' + 'level to return a classification [default: %default]') parser.set_defaults(verbose=False, min_confidence=0.80) opts, args = parser.parse_args(argv) if len(args) != 1: parser.error('Exactly one argument is required.') return opts, args
python
def parse_command_line_parameters(argv=None): """ Parses command line arguments """ usage =\ 'usage: %prog [options] input_sequences_filepath' version = 'Version: %prog ' + __version__ parser = OptionParser(usage=usage, version=version) parser.add_option('-o', '--output_fp', action='store', type='string', dest='output_fp', help='Path to store ' + 'output file [default: generated from input_sequences_filepath]') parser.add_option('-c', '--min_confidence', action='store', type='float', dest='min_confidence', help='minimum confidence ' + 'level to return a classification [default: %default]') parser.set_defaults(verbose=False, min_confidence=0.80) opts, args = parser.parse_args(argv) if len(args) != 1: parser.error('Exactly one argument is required.') return opts, args
[ "def", "parse_command_line_parameters", "(", "argv", "=", "None", ")", ":", "usage", "=", "'usage: %prog [options] input_sequences_filepath'", "version", "=", "'Version: %prog '", "+", "__version__", "parser", "=", "OptionParser", "(", "usage", "=", "usage", ",", "ver...
Parses command line arguments
[ "Parses", "command", "line", "arguments" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L353-L374
biocore/burrito-fillings
bfillings/rdp_classifier.py
assign_taxonomy
def assign_taxonomy( data, min_confidence=0.80, output_fp=None, training_data_fp=None, fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()): """Assign taxonomy to each sequence in data with the RDP classifier data: open fasta file object or list of fasta lines confidence: minimum support threshold to assign taxonomy to a sequence output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)} """ # Going to iterate through this twice in succession, best to force # evaluation now data = list(data) # RDP classifier doesn't preserve identifiers with spaces # Use lookup table seq_id_lookup = {} for seq_id, seq in parse_fasta(data): seq_id_lookup[seq_id.split()[0]] = seq_id app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpClassifier(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_output_file = tempfile.NamedTemporaryFile( prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir) app.Parameters['-o'].on(temp_output_file.name) if training_data_fp is not None: app.Parameters['-t'].on(training_data_fp) if fixrank: app.Parameters['-f'].on('fixrank') else: app.Parameters['-f'].on('allrank') app_result = app(data) assignments = {} # ShortSequenceException messages are written to stdout # Tag these ID's as unassignable for line in app_result['StdOut']: excep = parse_rdp_exception(line) if excep is not None: _, rdp_id = excep orig_id = seq_id_lookup[rdp_id] assignments[orig_id] = ('Unassignable', 1.0) for line in app_result['Assignments']: rdp_id, direction, taxa = parse_rdp_assignment(line) if taxa[0][0] == "Root": taxa = taxa[1:] orig_id = seq_id_lookup[rdp_id] lineage, confidence = get_rdp_lineage(taxa, min_confidence) if lineage: assignments[orig_id] = (';'.join(lineage), confidence) else: assignments[orig_id] = ('Unclassified', 1.0) if output_fp: try: output_file = open(output_fp, 'w') except OSError: raise OSError("Can't open output file for writing: %s" % output_fp) for seq_id, assignment in assignments.items(): lineage, confidence = assignment output_file.write( '%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments
python
def assign_taxonomy( data, min_confidence=0.80, output_fp=None, training_data_fp=None, fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()): """Assign taxonomy to each sequence in data with the RDP classifier data: open fasta file object or list of fasta lines confidence: minimum support threshold to assign taxonomy to a sequence output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)} """ # Going to iterate through this twice in succession, best to force # evaluation now data = list(data) # RDP classifier doesn't preserve identifiers with spaces # Use lookup table seq_id_lookup = {} for seq_id, seq in parse_fasta(data): seq_id_lookup[seq_id.split()[0]] = seq_id app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpClassifier(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_output_file = tempfile.NamedTemporaryFile( prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir) app.Parameters['-o'].on(temp_output_file.name) if training_data_fp is not None: app.Parameters['-t'].on(training_data_fp) if fixrank: app.Parameters['-f'].on('fixrank') else: app.Parameters['-f'].on('allrank') app_result = app(data) assignments = {} # ShortSequenceException messages are written to stdout # Tag these ID's as unassignable for line in app_result['StdOut']: excep = parse_rdp_exception(line) if excep is not None: _, rdp_id = excep orig_id = seq_id_lookup[rdp_id] assignments[orig_id] = ('Unassignable', 1.0) for line in app_result['Assignments']: rdp_id, direction, taxa = parse_rdp_assignment(line) if taxa[0][0] == "Root": taxa = taxa[1:] orig_id = seq_id_lookup[rdp_id] lineage, confidence = get_rdp_lineage(taxa, min_confidence) if lineage: assignments[orig_id] = (';'.join(lineage), confidence) else: assignments[orig_id] = ('Unclassified', 1.0) if output_fp: try: output_file = open(output_fp, 'w') except OSError: raise OSError("Can't open output file for writing: %s" % output_fp) for seq_id, assignment in assignments.items(): lineage, confidence = assignment output_file.write( '%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments
[ "def", "assign_taxonomy", "(", "data", ",", "min_confidence", "=", "0.80", ",", "output_fp", "=", "None", ",", "training_data_fp", "=", "None", ",", "fixrank", "=", "True", ",", "max_memory", "=", "None", ",", "tmp_dir", "=", "tempfile", ".", "gettempdir", ...
Assign taxonomy to each sequence in data with the RDP classifier data: open fasta file object or list of fasta lines confidence: minimum support threshold to assign taxonomy to a sequence output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
[ "Assign", "taxonomy", "to", "each", "sequence", "in", "data", "with", "the", "RDP", "classifier" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L377-L452
biocore/burrito-fillings
bfillings/rdp_classifier.py
train_rdp_classifier
def train_rdp_classifier( training_seqs_file, taxonomy_file, model_output_dir, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may """ app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpTrainer(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_taxonomy_file = tempfile.NamedTemporaryFile( prefix='RdpTaxonomy_', suffix='.txt', dir=tmp_dir) temp_taxonomy_file.write(taxonomy_file.read()) temp_taxonomy_file.seek(0) app.Parameters['taxonomy_file'].on(temp_taxonomy_file.name) app.Parameters['model_output_dir'].on(model_output_dir) return app(training_seqs_file)
python
def train_rdp_classifier( training_seqs_file, taxonomy_file, model_output_dir, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may """ app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpTrainer(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_taxonomy_file = tempfile.NamedTemporaryFile( prefix='RdpTaxonomy_', suffix='.txt', dir=tmp_dir) temp_taxonomy_file.write(taxonomy_file.read()) temp_taxonomy_file.seek(0) app.Parameters['taxonomy_file'].on(temp_taxonomy_file.name) app.Parameters['model_output_dir'].on(model_output_dir) return app(training_seqs_file)
[ "def", "train_rdp_classifier", "(", "training_seqs_file", ",", "taxonomy_file", ",", "model_output_dir", ",", "max_memory", "=", "None", ",", "tmp_dir", "=", "tempfile", ".", "gettempdir", "(", ")", ")", ":", "app_kwargs", "=", "{", "}", "if", "tmp_dir", "is",...
Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may
[ "Train", "RDP", "Classifier", "saving", "to", "model_output_dir" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L455-L485
biocore/burrito-fillings
bfillings/rdp_classifier.py
train_rdp_classifier_and_assign_taxonomy
def train_rdp_classifier_and_assign_taxonomy( training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80, model_output_dir=None, classification_output_fp=None, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier and assign taxonomy in one fell swoop The file objects training_seqs_file and taxonomy_file are used to train the RDP Classifier (see RdpTrainer documentation for details). Model data is stored in model_output_dir. If model_output_dir is not provided, a temporary directory is created and removed after classification. The sequences in seqs_to_classify are classified according to the model and filtered at the desired confidence level (default: 0.80). The results are saved to classification_output_fp if provided, otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is returned. """ if model_output_dir is None: training_dir = tempfile.mkdtemp(prefix='RdpTrainer_', dir=tmp_dir) else: training_dir = model_output_dir training_results = train_rdp_classifier( training_seqs_file, taxonomy_file, training_dir, max_memory=max_memory, tmp_dir=tmp_dir) training_data_fp = training_results['properties'].name assignment_results = assign_taxonomy( seqs_to_classify, min_confidence=min_confidence, output_fp=classification_output_fp, training_data_fp=training_data_fp, max_memory=max_memory, fixrank=False, tmp_dir=tmp_dir) if model_output_dir is None: # Forum user reported an error on the call to os.rmtree: # https://groups.google.com/d/topic/qiime-forum/MkNe7-JtSBw/discussion # We were not able to replicate the problem and fix it # properly. However, even if an error occurs, we would like # to return results, along with a warning. try: rmtree(training_dir) except OSError: msg = ( "Temporary training directory %s not removed" % training_dir) if os.path.isdir(training_dir): training_dir_files = os.listdir(training_dir) msg += "\nDetected files %s" % training_dir_files warnings.warn(msg, RuntimeWarning) return assignment_results
python
def train_rdp_classifier_and_assign_taxonomy( training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80, model_output_dir=None, classification_output_fp=None, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier and assign taxonomy in one fell swoop The file objects training_seqs_file and taxonomy_file are used to train the RDP Classifier (see RdpTrainer documentation for details). Model data is stored in model_output_dir. If model_output_dir is not provided, a temporary directory is created and removed after classification. The sequences in seqs_to_classify are classified according to the model and filtered at the desired confidence level (default: 0.80). The results are saved to classification_output_fp if provided, otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is returned. """ if model_output_dir is None: training_dir = tempfile.mkdtemp(prefix='RdpTrainer_', dir=tmp_dir) else: training_dir = model_output_dir training_results = train_rdp_classifier( training_seqs_file, taxonomy_file, training_dir, max_memory=max_memory, tmp_dir=tmp_dir) training_data_fp = training_results['properties'].name assignment_results = assign_taxonomy( seqs_to_classify, min_confidence=min_confidence, output_fp=classification_output_fp, training_data_fp=training_data_fp, max_memory=max_memory, fixrank=False, tmp_dir=tmp_dir) if model_output_dir is None: # Forum user reported an error on the call to os.rmtree: # https://groups.google.com/d/topic/qiime-forum/MkNe7-JtSBw/discussion # We were not able to replicate the problem and fix it # properly. However, even if an error occurs, we would like # to return results, along with a warning. try: rmtree(training_dir) except OSError: msg = ( "Temporary training directory %s not removed" % training_dir) if os.path.isdir(training_dir): training_dir_files = os.listdir(training_dir) msg += "\nDetected files %s" % training_dir_files warnings.warn(msg, RuntimeWarning) return assignment_results
[ "def", "train_rdp_classifier_and_assign_taxonomy", "(", "training_seqs_file", ",", "taxonomy_file", ",", "seqs_to_classify", ",", "min_confidence", "=", "0.80", ",", "model_output_dir", "=", "None", ",", "classification_output_fp", "=", "None", ",", "max_memory", "=", "...
Train RDP Classifier and assign taxonomy in one fell swoop The file objects training_seqs_file and taxonomy_file are used to train the RDP Classifier (see RdpTrainer documentation for details). Model data is stored in model_output_dir. If model_output_dir is not provided, a temporary directory is created and removed after classification. The sequences in seqs_to_classify are classified according to the model and filtered at the desired confidence level (default: 0.80). The results are saved to classification_output_fp if provided, otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is returned.
[ "Train", "RDP", "Classifier", "and", "assign", "taxonomy", "in", "one", "fell", "swoop" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L488-L539
biocore/burrito-fillings
bfillings/rdp_classifier.py
parse_rdp_assignment
def parse_rdp_assignment(line): """Returns a list of assigned taxa from an RDP classification line """ toks = line.strip().split('\t') seq_id = toks.pop(0) direction = toks.pop(0) if ((len(toks) % 3) != 0): raise ValueError( "Expected assignments in a repeating series of (rank, name, " "confidence), received %s" % toks) assignments = [] # Fancy way to create list of triples using consecutive items from # input. See grouper function in documentation for itertools for # more general example. itoks = iter(toks) for taxon, rank, confidence_str in zip(itoks, itoks, itoks): if not taxon: continue assignments.append((taxon.strip('"'), rank, float(confidence_str))) return seq_id, direction, assignments
python
def parse_rdp_assignment(line): """Returns a list of assigned taxa from an RDP classification line """ toks = line.strip().split('\t') seq_id = toks.pop(0) direction = toks.pop(0) if ((len(toks) % 3) != 0): raise ValueError( "Expected assignments in a repeating series of (rank, name, " "confidence), received %s" % toks) assignments = [] # Fancy way to create list of triples using consecutive items from # input. See grouper function in documentation for itertools for # more general example. itoks = iter(toks) for taxon, rank, confidence_str in zip(itoks, itoks, itoks): if not taxon: continue assignments.append((taxon.strip('"'), rank, float(confidence_str))) return seq_id, direction, assignments
[ "def", "parse_rdp_assignment", "(", "line", ")", ":", "toks", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "seq_id", "=", "toks", ".", "pop", "(", "0", ")", "direction", "=", "toks", ".", "pop", "(", "0", ")", "if", "(", ...
Returns a list of assigned taxa from an RDP classification line
[ "Returns", "a", "list", "of", "assigned", "taxa", "from", "an", "RDP", "classification", "line" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L563-L582
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpClassifier._error_on_missing_application
def _error_on_missing_application(self, params): """Raise an ApplicationNotFoundError if the app is not accessible In this case, checks for the java runtime and the RDP jar file. """ if not (os.path.exists('java') or which('java')): raise ApplicationNotFoundError( "Cannot find java runtime. Is it installed? Is it in your " "path?") jar_fp = self._get_jar_fp() if jar_fp is None: raise ApplicationNotFoundError( "JAR file not found in current directory and the RDP_JAR_PATH " "environment variable is not set. Please set RDP_JAR_PATH to " "the full pathname of the JAR file.") if not os.path.exists(jar_fp): raise ApplicationNotFoundError( "JAR file %s does not exist." % jar_fp)
python
def _error_on_missing_application(self, params): """Raise an ApplicationNotFoundError if the app is not accessible In this case, checks for the java runtime and the RDP jar file. """ if not (os.path.exists('java') or which('java')): raise ApplicationNotFoundError( "Cannot find java runtime. Is it installed? Is it in your " "path?") jar_fp = self._get_jar_fp() if jar_fp is None: raise ApplicationNotFoundError( "JAR file not found in current directory and the RDP_JAR_PATH " "environment variable is not set. Please set RDP_JAR_PATH to " "the full pathname of the JAR file.") if not os.path.exists(jar_fp): raise ApplicationNotFoundError( "JAR file %s does not exist." % jar_fp)
[ "def", "_error_on_missing_application", "(", "self", ",", "params", ")", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "'java'", ")", "or", "which", "(", "'java'", ")", ")", ":", "raise", "ApplicationNotFoundError", "(", "\"Cannot find java r...
Raise an ApplicationNotFoundError if the app is not accessible In this case, checks for the java runtime and the RDP jar file.
[ "Raise", "an", "ApplicationNotFoundError", "if", "the", "app", "is", "not", "accessible" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L131-L148
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpClassifier._get_jar_fp
def _get_jar_fp(self): """Returns the full path to the JAR file. If the JAR file cannot be found in the current directory and the environment variable RDP_JAR_PATH is not set, returns None. """ # handles case where the jar file is in the current working directory if os.path.exists(self._command): return self._command # handles the case where the user has specified the location via # an environment variable elif 'RDP_JAR_PATH' in environ: return getenv('RDP_JAR_PATH') else: return None
python
def _get_jar_fp(self): """Returns the full path to the JAR file. If the JAR file cannot be found in the current directory and the environment variable RDP_JAR_PATH is not set, returns None. """ # handles case where the jar file is in the current working directory if os.path.exists(self._command): return self._command # handles the case where the user has specified the location via # an environment variable elif 'RDP_JAR_PATH' in environ: return getenv('RDP_JAR_PATH') else: return None
[ "def", "_get_jar_fp", "(", "self", ")", ":", "# handles case where the jar file is in the current working directory", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_command", ")", ":", "return", "self", ".", "_command", "# handles the case where the user ha...
Returns the full path to the JAR file. If the JAR file cannot be found in the current directory and the environment variable RDP_JAR_PATH is not set, returns None.
[ "Returns", "the", "full", "path", "to", "the", "JAR", "file", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L150-L165
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpClassifier._get_base_command
def _get_base_command(self): """Returns the base command plus command-line options. Does not include input file, output file, and training set. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_arguments = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) jar_arguments = '-jar "%s"' % self._get_jar_fp() rdp_arguments = self._commandline_join( [self.Parameters[k] for k in self._options]) command_parts = [ cd_command, jvm_command, jvm_arguments, jar_arguments, rdp_arguments, '-q'] return self._commandline_join(command_parts).strip()
python
def _get_base_command(self): """Returns the base command plus command-line options. Does not include input file, output file, and training set. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_arguments = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) jar_arguments = '-jar "%s"' % self._get_jar_fp() rdp_arguments = self._commandline_join( [self.Parameters[k] for k in self._options]) command_parts = [ cd_command, jvm_command, jvm_arguments, jar_arguments, rdp_arguments, '-q'] return self._commandline_join(command_parts).strip()
[ "def", "_get_base_command", "(", "self", ")", ":", "cd_command", "=", "''", ".", "join", "(", "[", "'cd '", ",", "str", "(", "self", ".", "WorkingDir", ")", ",", "';'", "]", ")", "jvm_command", "=", "\"java\"", "jvm_arguments", "=", "self", ".", "_comm...
Returns the base command plus command-line options. Does not include input file, output file, and training set.
[ "Returns", "the", "base", "command", "plus", "command", "-", "line", "options", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L168-L184
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpClassifier._commandline_join
def _commandline_join(self, tokens): """Formats a list of tokens as a shell command This seems to be a repeated pattern; may be useful in superclass. """ commands = filter(None, map(str, tokens)) return self._command_delimiter.join(commands).strip()
python
def _commandline_join(self, tokens): """Formats a list of tokens as a shell command This seems to be a repeated pattern; may be useful in superclass. """ commands = filter(None, map(str, tokens)) return self._command_delimiter.join(commands).strip()
[ "def", "_commandline_join", "(", "self", ",", "tokens", ")", ":", "commands", "=", "filter", "(", "None", ",", "map", "(", "str", ",", "tokens", ")", ")", "return", "self", ".", "_command_delimiter", ".", "join", "(", "commands", ")", ".", "strip", "("...
Formats a list of tokens as a shell command This seems to be a repeated pattern; may be useful in superclass.
[ "Formats", "a", "list", "of", "tokens", "as", "a", "shell", "command" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L188-L195
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpClassifier._get_result_paths
def _get_result_paths(self, data): """ Return a dict of ResultPath objects representing all possible output """ assignment_fp = str(self.Parameters['-o'].Value).strip('"') if not os.path.isabs(assignment_fp): assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir) return {'Assignments': ResultPath(assignment_fp, IsWritten=True)}
python
def _get_result_paths(self, data): """ Return a dict of ResultPath objects representing all possible output """ assignment_fp = str(self.Parameters['-o'].Value).strip('"') if not os.path.isabs(assignment_fp): assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir) return {'Assignments': ResultPath(assignment_fp, IsWritten=True)}
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "assignment_fp", "=", "str", "(", "self", ".", "Parameters", "[", "'-o'", "]", ".", "Value", ")", ".", "strip", "(", "'\"'", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "as...
Return a dict of ResultPath objects representing all possible output
[ "Return", "a", "dict", "of", "ResultPath", "objects", "representing", "all", "possible", "output" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L197-L203
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpTrainer._get_base_command
def _get_base_command(self): """Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_args = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass) command_parts = [cd_command, jvm_command, jvm_args, cp_args] return self._commandline_join(command_parts).strip()
python
def _get_base_command(self): """Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method. """ cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) jvm_command = "java" jvm_args = self._commandline_join( [self.Parameters[k] for k in self._jvm_parameters]) cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass) command_parts = [cd_command, jvm_command, jvm_args, cp_args] return self._commandline_join(command_parts).strip()
[ "def", "_get_base_command", "(", "self", ")", ":", "cd_command", "=", "''", ".", "join", "(", "[", "'cd '", ",", "str", "(", "self", ".", "WorkingDir", ")", ",", "';'", "]", ")", "jvm_command", "=", "\"java\"", "jvm_args", "=", "self", ".", "_commandli...
Returns the base command plus command-line options. Handles everything up to and including the classpath. The positional training parameters are added by the _input_handler_decorator method.
[ "Returns", "the", "base", "command", "plus", "command", "-", "line", "options", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L224-L238
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpTrainer.ModelDir
def ModelDir(self): """Absolute FilePath to the training output directory. """ model_dir = self.Parameters['model_output_dir'].Value absolute_model_dir = os.path.abspath(model_dir) return FilePath(absolute_model_dir)
python
def ModelDir(self): """Absolute FilePath to the training output directory. """ model_dir = self.Parameters['model_output_dir'].Value absolute_model_dir = os.path.abspath(model_dir) return FilePath(absolute_model_dir)
[ "def", "ModelDir", "(", "self", ")", ":", "model_dir", "=", "self", ".", "Parameters", "[", "'model_output_dir'", "]", ".", "Value", "absolute_model_dir", "=", "os", ".", "path", ".", "abspath", "(", "model_dir", ")", "return", "FilePath", "(", "absolute_mod...
Absolute FilePath to the training output directory.
[ "Absolute", "FilePath", "to", "the", "training", "output", "directory", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L255-L260
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpTrainer._input_handler_decorator
def _input_handler_decorator(self, data): """Adds positional parameters to selected input_handler's results. """ input_handler = getattr(self, self.__InputHandler) input_parts = [ self.Parameters['taxonomy_file'], input_handler(data), self.Parameters['training_set_id'], self.Parameters['taxonomy_version'], self.Parameters['modification_info'], self.ModelDir, ] return self._commandline_join(input_parts)
python
def _input_handler_decorator(self, data): """Adds positional parameters to selected input_handler's results. """ input_handler = getattr(self, self.__InputHandler) input_parts = [ self.Parameters['taxonomy_file'], input_handler(data), self.Parameters['training_set_id'], self.Parameters['taxonomy_version'], self.Parameters['modification_info'], self.ModelDir, ] return self._commandline_join(input_parts)
[ "def", "_input_handler_decorator", "(", "self", ",", "data", ")", ":", "input_handler", "=", "getattr", "(", "self", ",", "self", ".", "__InputHandler", ")", "input_parts", "=", "[", "self", ".", "Parameters", "[", "'taxonomy_file'", "]", ",", "input_handler",...
Adds positional parameters to selected input_handler's results.
[ "Adds", "positional", "parameters", "to", "selected", "input_handler", "s", "results", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L262-L274
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpTrainer._get_result_paths
def _get_result_paths(self, output_dir): """Return a dict of output files. """ # Only include the properties file here. Add the other result # paths in the __call__ method, so we can catch errors if an # output file is not written. self._write_properties_file() properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) result_paths = { 'properties': ResultPath(properties_fp, IsWritten=True,) } return result_paths
python
def _get_result_paths(self, output_dir): """Return a dict of output files. """ # Only include the properties file here. Add the other result # paths in the __call__ method, so we can catch errors if an # output file is not written. self._write_properties_file() properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) result_paths = { 'properties': ResultPath(properties_fp, IsWritten=True,) } return result_paths
[ "def", "_get_result_paths", "(", "self", ",", "output_dir", ")", ":", "# Only include the properties file here. Add the other result", "# paths in the __call__ method, so we can catch errors if an", "# output file is not written.", "self", ".", "_write_properties_file", "(", ")", "pr...
Return a dict of output files.
[ "Return", "a", "dict", "of", "output", "files", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L276-L287
biocore/burrito-fillings
bfillings/rdp_classifier.py
RdpTrainer._write_properties_file
def _write_properties_file(self): """Write an RDP training properties file manually. """ # The properties file specifies the names of the files in the # training directory. We use the example properties file # directly from the rdp_classifier distribution, which lists # the default set of files created by the application. We # must write this file manually after generating the # training data. properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) properties_file = open(properties_fp, 'w') properties_file.write( "# Sample ResourceBundle properties file\n" "bergeyTree=bergeyTrainingTree.xml\n" "probabilityList=genus_wordConditionalProbList.txt\n" "probabilityIndex=wordConditionalProbIndexArr.txt\n" "wordPrior=logWordPrior.txt\n" "classifierVersion=Naive Bayesian rRNA Classifier Version 1.0, " "November 2003\n" ) properties_file.close()
python
def _write_properties_file(self): """Write an RDP training properties file manually. """ # The properties file specifies the names of the files in the # training directory. We use the example properties file # directly from the rdp_classifier distribution, which lists # the default set of files created by the application. We # must write this file manually after generating the # training data. properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) properties_file = open(properties_fp, 'w') properties_file.write( "# Sample ResourceBundle properties file\n" "bergeyTree=bergeyTrainingTree.xml\n" "probabilityList=genus_wordConditionalProbList.txt\n" "probabilityIndex=wordConditionalProbIndexArr.txt\n" "wordPrior=logWordPrior.txt\n" "classifierVersion=Naive Bayesian rRNA Classifier Version 1.0, " "November 2003\n" ) properties_file.close()
[ "def", "_write_properties_file", "(", "self", ")", ":", "# The properties file specifies the names of the files in the", "# training directory. We use the example properties file", "# directly from the rdp_classifier distribution, which lists", "# the default set of files created by the applicati...
Write an RDP training properties file manually.
[ "Write", "an", "RDP", "training", "properties", "file", "manually", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/rdp_classifier.py#L289-L309
gmr/infoblox
infoblox/cli.py
InfobloxHost.delete_old_host
def delete_old_host(self, hostname): """Remove all records for the host. :param str hostname: Hostname to remove :rtype: bool """ host = Host(self.session, name=hostname) return host.delete()
python
def delete_old_host(self, hostname): """Remove all records for the host. :param str hostname: Hostname to remove :rtype: bool """ host = Host(self.session, name=hostname) return host.delete()
[ "def", "delete_old_host", "(", "self", ",", "hostname", ")", ":", "host", "=", "Host", "(", "self", ".", "session", ",", "name", "=", "hostname", ")", "return", "host", ".", "delete", "(", ")" ]
Remove all records for the host. :param str hostname: Hostname to remove :rtype: bool
[ "Remove", "all", "records", "for", "the", "host", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/cli.py#L41-L49
gmr/infoblox
infoblox/cli.py
InfobloxHost.add_new_host
def add_new_host(self, hostname, ipv4addr, comment=None): """Add or update a host in the infoblox, overwriting any IP address entries. :param str hostname: Hostname to add/set :param str ipv4addr: IP Address to add/set :param str comment: The comment for the record """ host = Host(self.session, name=hostname) if host.ipv4addrs: host.ipv4addrs = [] host.add_ipv4addr(ipv4addr) host.comment = comment return host.save()
python
def add_new_host(self, hostname, ipv4addr, comment=None): """Add or update a host in the infoblox, overwriting any IP address entries. :param str hostname: Hostname to add/set :param str ipv4addr: IP Address to add/set :param str comment: The comment for the record """ host = Host(self.session, name=hostname) if host.ipv4addrs: host.ipv4addrs = [] host.add_ipv4addr(ipv4addr) host.comment = comment return host.save()
[ "def", "add_new_host", "(", "self", ",", "hostname", ",", "ipv4addr", ",", "comment", "=", "None", ")", ":", "host", "=", "Host", "(", "self", ".", "session", ",", "name", "=", "hostname", ")", "if", "host", ".", "ipv4addrs", ":", "host", ".", "ipv4a...
Add or update a host in the infoblox, overwriting any IP address entries. :param str hostname: Hostname to add/set :param str ipv4addr: IP Address to add/set :param str comment: The comment for the record
[ "Add", "or", "update", "a", "host", "in", "the", "infoblox", "overwriting", "any", "IP", "address", "entries", "." ]
train
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/cli.py#L51-L65
biocore/burrito-fillings
bfillings/mafft_v7.py
align_unaligned_seqs
def align_unaligned_seqs(seqs_fp, moltype=DNA, params=None, accurate=False): """Aligns unaligned sequences Parameters ---------- seqs_fp : string file path of the input fasta file moltype : {skbio.DNA, skbio.RNA, skbio.Protein} params : dict-like type It pass the additional parameter settings to the application. Default is None. accurate : boolean Perform accurate alignment or not. It will sacrifice performance if set to True. Default is False. Returns ------- Alignment object The aligned sequences. See Also -------- skbio.Alignment skbio.DNA skbio.RNA skbio.Protein """ # Create Mafft app. app = Mafft(InputHandler='_input_as_path', params=params) # Turn on correct sequence type app.Parameters[MOLTYPE_MAP[moltype]].on() # Do not report progress app.Parameters['--quiet'].on() # More accurate alignment, sacrificing performance. if accurate: app.Parameters['--globalpair'].on() app.Parameters['--maxiterate'].Value = 1000 # Get results using int_map as input to app res = app(seqs_fp) # Get alignment as dict out of results alignment = Alignment.read(res['StdOut'], constructor=moltype) # Clean up res.cleanUp() return alignment
python
def align_unaligned_seqs(seqs_fp, moltype=DNA, params=None, accurate=False): """Aligns unaligned sequences Parameters ---------- seqs_fp : string file path of the input fasta file moltype : {skbio.DNA, skbio.RNA, skbio.Protein} params : dict-like type It pass the additional parameter settings to the application. Default is None. accurate : boolean Perform accurate alignment or not. It will sacrifice performance if set to True. Default is False. Returns ------- Alignment object The aligned sequences. See Also -------- skbio.Alignment skbio.DNA skbio.RNA skbio.Protein """ # Create Mafft app. app = Mafft(InputHandler='_input_as_path', params=params) # Turn on correct sequence type app.Parameters[MOLTYPE_MAP[moltype]].on() # Do not report progress app.Parameters['--quiet'].on() # More accurate alignment, sacrificing performance. if accurate: app.Parameters['--globalpair'].on() app.Parameters['--maxiterate'].Value = 1000 # Get results using int_map as input to app res = app(seqs_fp) # Get alignment as dict out of results alignment = Alignment.read(res['StdOut'], constructor=moltype) # Clean up res.cleanUp() return alignment
[ "def", "align_unaligned_seqs", "(", "seqs_fp", ",", "moltype", "=", "DNA", ",", "params", "=", "None", ",", "accurate", "=", "False", ")", ":", "# Create Mafft app.", "app", "=", "Mafft", "(", "InputHandler", "=", "'_input_as_path'", ",", "params", "=", "par...
Aligns unaligned sequences Parameters ---------- seqs_fp : string file path of the input fasta file moltype : {skbio.DNA, skbio.RNA, skbio.Protein} params : dict-like type It pass the additional parameter settings to the application. Default is None. accurate : boolean Perform accurate alignment or not. It will sacrifice performance if set to True. Default is False. Returns ------- Alignment object The aligned sequences. See Also -------- skbio.Alignment skbio.DNA skbio.RNA skbio.Protein
[ "Aligns", "unaligned", "sequences" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mafft_v7.py#L250-L300
biocore/burrito-fillings
bfillings/mafft_v7.py
add_seqs_to_alignment
def add_seqs_to_alignment(seqs_fp, aln_fp, moltype=DNA, params=None, accurate=False): """Returns an Alignment object from seqs and existing Alignment. The "--seed" option can be used for adding unaligned sequences into a highly reliable alignment (seed) consisting of a small number of sequences. Parameters ---------- seqs_fp : string file path of the unaligned sequences aln_fp : string file path of the seed alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences. The seq in the seed alignment will have "_seed_" prefixed to their seq id. """ if params is None: params = {'--seed': aln_fp} else: params['--seed'] = aln_fp return align_unaligned_seqs(seqs_fp, moltype, params, accurate)
python
def add_seqs_to_alignment(seqs_fp, aln_fp, moltype=DNA, params=None, accurate=False): """Returns an Alignment object from seqs and existing Alignment. The "--seed" option can be used for adding unaligned sequences into a highly reliable alignment (seed) consisting of a small number of sequences. Parameters ---------- seqs_fp : string file path of the unaligned sequences aln_fp : string file path of the seed alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences. The seq in the seed alignment will have "_seed_" prefixed to their seq id. """ if params is None: params = {'--seed': aln_fp} else: params['--seed'] = aln_fp return align_unaligned_seqs(seqs_fp, moltype, params, accurate)
[ "def", "add_seqs_to_alignment", "(", "seqs_fp", ",", "aln_fp", ",", "moltype", "=", "DNA", ",", "params", "=", "None", ",", "accurate", "=", "False", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "'--seed'", ":", "aln_fp", "}", "else...
Returns an Alignment object from seqs and existing Alignment. The "--seed" option can be used for adding unaligned sequences into a highly reliable alignment (seed) consisting of a small number of sequences. Parameters ---------- seqs_fp : string file path of the unaligned sequences aln_fp : string file path of the seed alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences. The seq in the seed alignment will have "_seed_" prefixed to their seq id.
[ "Returns", "an", "Alignment", "object", "from", "seqs", "and", "existing", "Alignment", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mafft_v7.py#L303-L329
biocore/burrito-fillings
bfillings/mafft_v7.py
align_two_alignments
def align_two_alignments(aln1_fp, aln2_fp, moltype, params=None): """Returns an Alignment object from two existing Alignments. Parameters ---------- aln1_fp : string file path of 1st alignment aln2_fp : string file path of 2nd alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences. """ # Create Mafft app. app = Mafft(InputHandler='_input_as_paths', params=params, SuppressStderr=False) app._command = 'mafft-profile' # Get results using int_map as input to app res = app([aln1_fp, aln2_fp]) return Alignment.read(res['StdOut'], constructor=moltype)
python
def align_two_alignments(aln1_fp, aln2_fp, moltype, params=None): """Returns an Alignment object from two existing Alignments. Parameters ---------- aln1_fp : string file path of 1st alignment aln2_fp : string file path of 2nd alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences. """ # Create Mafft app. app = Mafft(InputHandler='_input_as_paths', params=params, SuppressStderr=False) app._command = 'mafft-profile' # Get results using int_map as input to app res = app([aln1_fp, aln2_fp]) return Alignment.read(res['StdOut'], constructor=moltype)
[ "def", "align_two_alignments", "(", "aln1_fp", ",", "aln2_fp", ",", "moltype", ",", "params", "=", "None", ")", ":", "# Create Mafft app.", "app", "=", "Mafft", "(", "InputHandler", "=", "'_input_as_paths'", ",", "params", "=", "params", ",", "SuppressStderr", ...
Returns an Alignment object from two existing Alignments. Parameters ---------- aln1_fp : string file path of 1st alignment aln2_fp : string file path of 2nd alignment params : dict of parameters to pass in to the Mafft app controller. Returns ------- The aligned sequences.
[ "Returns", "an", "Alignment", "object", "from", "two", "existing", "Alignments", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mafft_v7.py#L332-L357
biocore/burrito-fillings
bfillings/mafft_v7.py
Mafft._input_as_seqs
def _input_as_seqs(self, data): """Format a list of seq as input. Parameters ---------- data: list of strings Each string is a sequence to be aligned. Returns ------- A temp file name that contains the sequences. See Also -------- burrito.util.CommandLineApplication """ lines = [] for i, s in enumerate(data): # will number the sequences 1,2,3,etc. lines.append(''.join(['>', str(i+1)])) lines.append(s) return self._input_as_lines(lines)
python
def _input_as_seqs(self, data): """Format a list of seq as input. Parameters ---------- data: list of strings Each string is a sequence to be aligned. Returns ------- A temp file name that contains the sequences. See Also -------- burrito.util.CommandLineApplication """ lines = [] for i, s in enumerate(data): # will number the sequences 1,2,3,etc. lines.append(''.join(['>', str(i+1)])) lines.append(s) return self._input_as_lines(lines)
[ "def", "_input_as_seqs", "(", "self", ",", "data", ")", ":", "lines", "=", "[", "]", "for", "i", ",", "s", "in", "enumerate", "(", "data", ")", ":", "# will number the sequences 1,2,3,etc.", "lines", ".", "append", "(", "''", ".", "join", "(", "[", "'>...
Format a list of seq as input. Parameters ---------- data: list of strings Each string is a sequence to be aligned. Returns ------- A temp file name that contains the sequences. See Also -------- burrito.util.CommandLineApplication
[ "Format", "a", "list", "of", "seq", "as", "input", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mafft_v7.py#L204-L225
halfak/deltas
deltas/segmenters/segmenter.py
Segmenter.from_config
def from_config(cls, config, name, section_key="segmenters"): """ Constructs a segmenter from a configuration doc. """ section = config[section_key][name] segmenter_class_path = section['class'] Segmenter = yamlconf.import_module(segmenter_class_path) return Segmenter.from_config(config, name, section_key=section_key)
python
def from_config(cls, config, name, section_key="segmenters"): """ Constructs a segmenter from a configuration doc. """ section = config[section_key][name] segmenter_class_path = section['class'] Segmenter = yamlconf.import_module(segmenter_class_path) return Segmenter.from_config(config, name, section_key=section_key)
[ "def", "from_config", "(", "cls", ",", "config", ",", "name", ",", "section_key", "=", "\"segmenters\"", ")", ":", "section", "=", "config", "[", "section_key", "]", "[", "name", "]", "segmenter_class_path", "=", "section", "[", "'class'", "]", "Segmenter", ...
Constructs a segmenter from a configuration doc.
[ "Constructs", "a", "segmenter", "from", "a", "configuration", "doc", "." ]
train
https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/segmenters/segmenter.py#L19-L26
dailymuse/oz
oz/redis_sessions/__init__.py
random_hex
def random_hex(length): """Generates a random hex string""" return escape.to_unicode(binascii.hexlify(os.urandom(length))[length:])
python
def random_hex(length): """Generates a random hex string""" return escape.to_unicode(binascii.hexlify(os.urandom(length))[length:])
[ "def", "random_hex", "(", "length", ")", ":", "return", "escape", ".", "to_unicode", "(", "binascii", ".", "hexlify", "(", "os", ".", "urandom", "(", "length", ")", ")", "[", "length", ":", "]", ")" ]
Generates a random hex string
[ "Generates", "a", "random", "hex", "string" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/__init__.py#L13-L15
dailymuse/oz
oz/redis_sessions/__init__.py
password_hash
def password_hash(password, password_salt=None): """Hashes a specified password""" password_salt = password_salt or oz.settings["session_salt"] salted_password = password_salt + password return "sha256!%s" % hashlib.sha256(salted_password.encode("utf-8")).hexdigest()
python
def password_hash(password, password_salt=None): """Hashes a specified password""" password_salt = password_salt or oz.settings["session_salt"] salted_password = password_salt + password return "sha256!%s" % hashlib.sha256(salted_password.encode("utf-8")).hexdigest()
[ "def", "password_hash", "(", "password", ",", "password_salt", "=", "None", ")", ":", "password_salt", "=", "password_salt", "or", "oz", ".", "settings", "[", "\"session_salt\"", "]", "salted_password", "=", "password_salt", "+", "password", "return", "\"sha256!%s...
Hashes a specified password
[ "Hashes", "a", "specified", "password" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/__init__.py#L17-L21
scott-maddox/openbandparams
src/openbandparams/algorithms.py
bisect
def bisect(func, a, b, xtol=1e-12, maxiter=100): """ Finds the root of `func` using the bisection method. Requirements ------------ - func must be continuous function that accepts a single number input and returns a single number - `func(a)` and `func(b)` must have opposite sign Parameters ---------- func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number, optional the solution tolerance of the input value. The algorithm is considered converged if `abs(b-a)2. < xtol` maxiter : number, optional the maximum number of iterations allowed for convergence """ fa = func(a) if fa == 0.: return a fb = func(b) if fb == 0.: return b assert sign(fa) != sign(fb) for i in xrange(maxiter): c = (a + b) / 2. fc = func(c) if fc == 0. or abs(b - a) / 2. < xtol: return c if sign(fc) == sign(func(a)): a = c else: b = c else: raise RuntimeError('Failed to converge after %d iterations.' % maxiter)
python
def bisect(func, a, b, xtol=1e-12, maxiter=100): """ Finds the root of `func` using the bisection method. Requirements ------------ - func must be continuous function that accepts a single number input and returns a single number - `func(a)` and `func(b)` must have opposite sign Parameters ---------- func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number, optional the solution tolerance of the input value. The algorithm is considered converged if `abs(b-a)2. < xtol` maxiter : number, optional the maximum number of iterations allowed for convergence """ fa = func(a) if fa == 0.: return a fb = func(b) if fb == 0.: return b assert sign(fa) != sign(fb) for i in xrange(maxiter): c = (a + b) / 2. fc = func(c) if fc == 0. or abs(b - a) / 2. < xtol: return c if sign(fc) == sign(func(a)): a = c else: b = c else: raise RuntimeError('Failed to converge after %d iterations.' % maxiter)
[ "def", "bisect", "(", "func", ",", "a", ",", "b", ",", "xtol", "=", "1e-12", ",", "maxiter", "=", "100", ")", ":", "fa", "=", "func", "(", "a", ")", "if", "fa", "==", "0.", ":", "return", "a", "fb", "=", "func", "(", "b", ")", "if", "fb", ...
Finds the root of `func` using the bisection method. Requirements ------------ - func must be continuous function that accepts a single number input and returns a single number - `func(a)` and `func(b)` must have opposite sign Parameters ---------- func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number, optional the solution tolerance of the input value. The algorithm is considered converged if `abs(b-a)2. < xtol` maxiter : number, optional the maximum number of iterations allowed for convergence
[ "Finds", "the", "root", "of", "func", "using", "the", "bisection", "method", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/algorithms.py#L29-L73
michaelpb/omnic
omnic/types/resourceurl.py
ResourceURL.parse_string
def parse_string(s): ''' Parses a foreign resource URL into the URL string itself and any relevant args and kwargs ''' matched_obj = SPLIT_URL_RE.match(s) if not matched_obj: raise URLParseException('Invalid Resource URL: "%s"' % s) url_string, arguments_string = matched_obj.groups() args_as_strings = URL_ARGUMENTS_RE.findall(arguments_string) # Determine args and kwargs args = [] kwargs = {} for arg_string in args_as_strings: kwarg_match = ARG_RE.match(arg_string) if kwarg_match: key, value = kwarg_match.groups() kwargs[key.strip()] = value.strip() else: args.append(arg_string.strip()) # Default to HTTP if url_string has no URL if not SCHEME_RE.match(url_string): url_string = '%s://%s' % (DEFAULT_SCHEME, url_string) return url_string.strip(), args, kwargs
python
def parse_string(s): ''' Parses a foreign resource URL into the URL string itself and any relevant args and kwargs ''' matched_obj = SPLIT_URL_RE.match(s) if not matched_obj: raise URLParseException('Invalid Resource URL: "%s"' % s) url_string, arguments_string = matched_obj.groups() args_as_strings = URL_ARGUMENTS_RE.findall(arguments_string) # Determine args and kwargs args = [] kwargs = {} for arg_string in args_as_strings: kwarg_match = ARG_RE.match(arg_string) if kwarg_match: key, value = kwarg_match.groups() kwargs[key.strip()] = value.strip() else: args.append(arg_string.strip()) # Default to HTTP if url_string has no URL if not SCHEME_RE.match(url_string): url_string = '%s://%s' % (DEFAULT_SCHEME, url_string) return url_string.strip(), args, kwargs
[ "def", "parse_string", "(", "s", ")", ":", "matched_obj", "=", "SPLIT_URL_RE", ".", "match", "(", "s", ")", "if", "not", "matched_obj", ":", "raise", "URLParseException", "(", "'Invalid Resource URL: \"%s\"'", "%", "s", ")", "url_string", ",", "arguments_string"...
Parses a foreign resource URL into the URL string itself and any relevant args and kwargs
[ "Parses", "a", "foreign", "resource", "URL", "into", "the", "URL", "string", "itself", "and", "any", "relevant", "args", "and", "kwargs" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/types/resourceurl.py#L55-L82
dailymuse/oz
oz/bandit/actions.py
add_experiment
def add_experiment(experiment): """Adds a new experiment""" redis = oz.redis.create_connection() oz.bandit.add_experiment(redis, experiment)
python
def add_experiment(experiment): """Adds a new experiment""" redis = oz.redis.create_connection() oz.bandit.add_experiment(redis, experiment)
[ "def", "add_experiment", "(", "experiment", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "oz", ".", "bandit", ".", "add_experiment", "(", "redis", ",", "experiment", ")" ]
Adds a new experiment
[ "Adds", "a", "new", "experiment" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L10-L13
dailymuse/oz
oz/bandit/actions.py
archive_experiment
def archive_experiment(experiment): """Archives an experiment""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).archive()
python
def archive_experiment(experiment): """Archives an experiment""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).archive()
[ "def", "archive_experiment", "(", "experiment", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "oz", ".", "bandit", ".", "Experiment", "(", "redis", ",", "experiment", ")", ".", "archive", "(", ")" ]
Archives an experiment
[ "Archives", "an", "experiment" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L16-L19
dailymuse/oz
oz/bandit/actions.py
add_experiment_choice
def add_experiment_choice(experiment, choice): """Adds an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).add_choice(choice)
python
def add_experiment_choice(experiment, choice): """Adds an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).add_choice(choice)
[ "def", "add_experiment_choice", "(", "experiment", ",", "choice", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "oz", ".", "bandit", ".", "Experiment", "(", "redis", ",", "experiment", ")", ".", "add_choice", "(", "choice"...
Adds an experiment choice
[ "Adds", "an", "experiment", "choice" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L22-L25
dailymuse/oz
oz/bandit/actions.py
remove_experiment_choice
def remove_experiment_choice(experiment, choice): """Removes an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).remove_choice(choice)
python
def remove_experiment_choice(experiment, choice): """Removes an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).remove_choice(choice)
[ "def", "remove_experiment_choice", "(", "experiment", ",", "choice", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "oz", ".", "bandit", ".", "Experiment", "(", "redis", ",", "experiment", ")", ".", "remove_choice", "(", "c...
Removes an experiment choice
[ "Removes", "an", "experiment", "choice" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L28-L31
dailymuse/oz
oz/bandit/actions.py
get_experiment_results
def get_experiment_results(): """ Computes the results of all experiments, stores it in redis, and prints it out """ redis = oz.redis.create_connection() for experiment in oz.bandit.get_experiments(redis): experiment.compute_default_choice() csq, confident = experiment.confidence() print("%s:" % experiment.name) print("- creation date: %s" % experiment.metadata["creation_date"]) print("- default choice: %s" % experiment.default_choice) print("- chi squared: %s" % csq) print("- confident: %s" % confident) print("- choices:") for choice in experiment.choices: print(" - %s: plays=%s, rewards=%s, performance=%s" % (choice.name, choice.plays, choice.rewards, choice.performance))
python
def get_experiment_results(): """ Computes the results of all experiments, stores it in redis, and prints it out """ redis = oz.redis.create_connection() for experiment in oz.bandit.get_experiments(redis): experiment.compute_default_choice() csq, confident = experiment.confidence() print("%s:" % experiment.name) print("- creation date: %s" % experiment.metadata["creation_date"]) print("- default choice: %s" % experiment.default_choice) print("- chi squared: %s" % csq) print("- confident: %s" % confident) print("- choices:") for choice in experiment.choices: print(" - %s: plays=%s, rewards=%s, performance=%s" % (choice.name, choice.plays, choice.rewards, choice.performance))
[ "def", "get_experiment_results", "(", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "for", "experiment", "in", "oz", ".", "bandit", ".", "get_experiments", "(", "redis", ")", ":", "experiment", ".", "compute_default_choice", ...
Computes the results of all experiments, stores it in redis, and prints it out
[ "Computes", "the", "results", "of", "all", "experiments", "stores", "it", "in", "redis", "and", "prints", "it", "out" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L34-L54
dailymuse/oz
oz/bandit/actions.py
sync_experiments_from_spec
def sync_experiments_from_spec(filename): """ Takes the path to a JSON file declaring experiment specifications, and modifies the experiments stored in redis to match the spec. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] } """ redis = oz.redis.create_connection() with open(filename, "r") as f: schema = escape.json_decode(f.read()) oz.bandit.sync_from_spec(redis, schema)
python
def sync_experiments_from_spec(filename): """ Takes the path to a JSON file declaring experiment specifications, and modifies the experiments stored in redis to match the spec. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] } """ redis = oz.redis.create_connection() with open(filename, "r") as f: schema = escape.json_decode(f.read()) oz.bandit.sync_from_spec(redis, schema)
[ "def", "sync_experiments_from_spec", "(", "filename", ")", ":", "redis", "=", "oz", ".", "redis", ".", "create_connection", "(", ")", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "schema", "=", "escape", ".", "json_decode", "(", "f...
Takes the path to a JSON file declaring experiment specifications, and modifies the experiments stored in redis to match the spec. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] }
[ "Takes", "the", "path", "to", "a", "JSON", "file", "declaring", "experiment", "specifications", "and", "modifies", "the", "experiments", "stored", "in", "redis", "to", "match", "the", "spec", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/bandit/actions.py#L57-L74