repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
calmjs/calmjs
src/calmjs/ui.py
prompt_overwrite_json
def prompt_overwrite_json(original, new, target_path, dumps=json_dumps): """ Prompt end user with a diff of original and new json that may overwrite the file at the target_path. This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality. Optionally, a custom json.dumps method can also be passed in for output generation. """ # generate compacted ndiff output. diff = '\n'.join(l for l in ( line.rstrip() for line in difflib.ndiff( json_dumps(original).splitlines(), json_dumps(new).splitlines(), )) if l[:1] in '?+-' or l[-1:] in '{}' or l[-2:] == '},') basename_target = basename(target_path) return prompt( "Generated '%(basename_target)s' differs with '%(target_path)s'.\n\n" "The following is a compacted list of changes required:\n" "%(diff)s\n\n" "Overwrite '%(target_path)s'?" % locals(), choices=( ('Yes', True), ('No', False), ), default_key=1, )
python
def prompt_overwrite_json(original, new, target_path, dumps=json_dumps): """ Prompt end user with a diff of original and new json that may overwrite the file at the target_path. This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality. Optionally, a custom json.dumps method can also be passed in for output generation. """ # generate compacted ndiff output. diff = '\n'.join(l for l in ( line.rstrip() for line in difflib.ndiff( json_dumps(original).splitlines(), json_dumps(new).splitlines(), )) if l[:1] in '?+-' or l[-1:] in '{}' or l[-2:] == '},') basename_target = basename(target_path) return prompt( "Generated '%(basename_target)s' differs with '%(target_path)s'.\n\n" "The following is a compacted list of changes required:\n" "%(diff)s\n\n" "Overwrite '%(target_path)s'?" % locals(), choices=( ('Yes', True), ('No', False), ), default_key=1, )
[ "def", "prompt_overwrite_json", "(", "original", ",", "new", ",", "target_path", ",", "dumps", "=", "json_dumps", ")", ":", "# generate compacted ndiff output.", "diff", "=", "'\\n'", ".", "join", "(", "l", "for", "l", "in", "(", "line", ".", "rstrip", "(", ...
Prompt end user with a diff of original and new json that may overwrite the file at the target_path. This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality. Optionally, a custom json.dumps method can also be passed in for output generation.
[ "Prompt", "end", "user", "with", "a", "diff", "of", "original", "and", "new", "json", "that", "may", "overwrite", "the", "file", "at", "the", "target_path", ".", "This", "function", "only", "displays", "a", "confirmation", "prompt", "and", "it", "is", "up"...
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/ui.py#L193-L220
train
45,700
calmjs/calmjs
src/calmjs/npm.py
locate_package_entry_file
def locate_package_entry_file(working_dir, package_name): """ Locate a single npm package to return its browser or main entry. """ basedir = join(working_dir, 'node_modules', package_name) package_json = join(basedir, 'package.json') if not exists(package_json): logger.debug( "could not locate package.json for the npm package '%s' in the " "current working directory '%s'; the package may have been " "not installed, the build process may fail", package_name, working_dir, ) return with open(package_json) as fd: package_info = json.load(fd) if ('browser' in package_info or 'main' in package_info): # assume the target file exists because configuration files # never lie /s return join( basedir, *(package_info.get('browser') or package_info['main']).split('/') ) index_js = join(basedir, 'index.js') if exists(index_js): return index_js logger.debug( "package.json for the npm package '%s' does not contain a main " "entry point", package_name, )
python
def locate_package_entry_file(working_dir, package_name): """ Locate a single npm package to return its browser or main entry. """ basedir = join(working_dir, 'node_modules', package_name) package_json = join(basedir, 'package.json') if not exists(package_json): logger.debug( "could not locate package.json for the npm package '%s' in the " "current working directory '%s'; the package may have been " "not installed, the build process may fail", package_name, working_dir, ) return with open(package_json) as fd: package_info = json.load(fd) if ('browser' in package_info or 'main' in package_info): # assume the target file exists because configuration files # never lie /s return join( basedir, *(package_info.get('browser') or package_info['main']).split('/') ) index_js = join(basedir, 'index.js') if exists(index_js): return index_js logger.debug( "package.json for the npm package '%s' does not contain a main " "entry point", package_name, )
[ "def", "locate_package_entry_file", "(", "working_dir", ",", "package_name", ")", ":", "basedir", "=", "join", "(", "working_dir", ",", "'node_modules'", ",", "package_name", ")", "package_json", "=", "join", "(", "basedir", ",", "'package.json'", ")", "if", "no...
Locate a single npm package to return its browser or main entry.
[ "Locate", "a", "single", "npm", "package", "to", "return", "its", "browser", "or", "main", "entry", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/npm.py#L52-L86
train
45,701
bitlabstudio/cmsplugin-image-gallery
image_gallery/templatetags/image_gallery_tags.py
render_pictures
def render_pictures(context, selection='recent', amount=3): """Template tag to render a list of pictures.""" pictures = Image.objects.filter( folder__id__in=Gallery.objects.filter(is_published=True).values_list( 'folder__pk', flat=True)) if selection == 'recent': context.update({ 'pictures': pictures.order_by('-uploaded_at')[:amount] }) elif selection == 'random': context.update({ 'pictures': pictures.order_by('?')[:amount] }) else: return None return context
python
def render_pictures(context, selection='recent', amount=3): """Template tag to render a list of pictures.""" pictures = Image.objects.filter( folder__id__in=Gallery.objects.filter(is_published=True).values_list( 'folder__pk', flat=True)) if selection == 'recent': context.update({ 'pictures': pictures.order_by('-uploaded_at')[:amount] }) elif selection == 'random': context.update({ 'pictures': pictures.order_by('?')[:amount] }) else: return None return context
[ "def", "render_pictures", "(", "context", ",", "selection", "=", "'recent'", ",", "amount", "=", "3", ")", ":", "pictures", "=", "Image", ".", "objects", ".", "filter", "(", "folder__id__in", "=", "Gallery", ".", "objects", ".", "filter", "(", "is_publishe...
Template tag to render a list of pictures.
[ "Template", "tag", "to", "render", "a", "list", "of", "pictures", "." ]
f16a2d5d0a6fde469bc07436ff0cd84af2c78e5c
https://github.com/bitlabstudio/cmsplugin-image-gallery/blob/f16a2d5d0a6fde469bc07436ff0cd84af2c78e5c/image_gallery/templatetags/image_gallery_tags.py#L12-L27
train
45,702
pandeylab/pythomics
pythomics/genomics/structures.py
VCFFile.add_header
def add_header(self, entry): """Parses the VCF Header field and returns the number of samples in the VCF file""" info = entry.split('\t') self.n_individuals = len(info)-9 for i,v in enumerate(info[9:]): self.individuals[v] = i return self.n_individuals > 0
python
def add_header(self, entry): """Parses the VCF Header field and returns the number of samples in the VCF file""" info = entry.split('\t') self.n_individuals = len(info)-9 for i,v in enumerate(info[9:]): self.individuals[v] = i return self.n_individuals > 0
[ "def", "add_header", "(", "self", ",", "entry", ")", ":", "info", "=", "entry", ".", "split", "(", "'\\t'", ")", "self", ".", "n_individuals", "=", "len", "(", "info", ")", "-", "9", "for", "i", ",", "v", "in", "enumerate", "(", "info", "[", "9",...
Parses the VCF Header field and returns the number of samples in the VCF file
[ "Parses", "the", "VCF", "Header", "field", "and", "returns", "the", "number", "of", "samples", "in", "the", "VCF", "file" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L47-L53
train
45,703
pandeylab/pythomics
pythomics/genomics/structures.py
VCFFile.add_entry
def add_entry(self, row): """This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well. """ var_call = VCFEntry(self.individuals) var_call.parse_entry( row ) self.entries[(var_call.chrom, var_call.pos)] = var_call return var_call
python
def add_entry(self, row): """This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well. """ var_call = VCFEntry(self.individuals) var_call.parse_entry( row ) self.entries[(var_call.chrom, var_call.pos)] = var_call return var_call
[ "def", "add_entry", "(", "self", ",", "row", ")", ":", "var_call", "=", "VCFEntry", "(", "self", ".", "individuals", ")", "var_call", ".", "parse_entry", "(", "row", ")", "self", ".", "entries", "[", "(", "var_call", ".", "chrom", ",", "var_call", ".",...
This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well.
[ "This", "will", "parse", "the", "VCF", "entry", "and", "also", "store", "it", "within", "the", "VCFFile", ".", "It", "will", "also", "return", "the", "VCFEntry", "as", "well", "." ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L72-L80
train
45,704
pandeylab/pythomics
pythomics/genomics/structures.py
VCFFile.get_header
def get_header(self, individual=-1): """Returns the vcf header """ type_map = dict([(val,key) for key,val in self.meta.type_map.iteritems()]) extra = '\n'.join(['##{0}'.format(i) for i in self.meta.extra]) info = '\n'.join(['##INFO=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.info.iteritems()]) filter = '\n'.join(['##FILTER=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.filter.iteritems()]) format = '\n'.join(['##FORMAT=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.format.iteritems()]) alt = '\n'.join(['##ALT=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.alt.iteritems()]) header = '\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT']) if individual is not None: if individual == -1: individual = '\t'.join(self.individuals.keys()) else: if isinstance(individual, int): for i, v in self.individuals.iteritems(): if v == individual: individual = i break header += '\t'+individual return '\n'.join([extra, info, filter, format, alt, header])
python
def get_header(self, individual=-1): """Returns the vcf header """ type_map = dict([(val,key) for key,val in self.meta.type_map.iteritems()]) extra = '\n'.join(['##{0}'.format(i) for i in self.meta.extra]) info = '\n'.join(['##INFO=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.info.iteritems()]) filter = '\n'.join(['##FILTER=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.filter.iteritems()]) format = '\n'.join(['##FORMAT=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.format.iteritems()]) alt = '\n'.join(['##ALT=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.alt.iteritems()]) header = '\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT']) if individual is not None: if individual == -1: individual = '\t'.join(self.individuals.keys()) else: if isinstance(individual, int): for i, v in self.individuals.iteritems(): if v == individual: individual = i break header += '\t'+individual return '\n'.join([extra, info, filter, format, alt, header])
[ "def", "get_header", "(", "self", ",", "individual", "=", "-", "1", ")", ":", "type_map", "=", "dict", "(", "[", "(", "val", ",", "key", ")", "for", "key", ",", "val", "in", "self", ".", "meta", ".", "type_map", ".", "iteritems", "(", ")", "]", ...
Returns the vcf header
[ "Returns", "the", "vcf", "header" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L82-L103
train
45,705
pandeylab/pythomics
pythomics/genomics/structures.py
VCFMeta.add_info
def add_info(self, entry): """Parse and store the info field""" entry = entry[8:-1] info = entry.split(',') if len(info) < 4: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.info[value] = {} id_ = value elif key == 'Number': if value == 'A' or value == 'G': value = -1 self.info[id_]['num_entries'] = value elif key == 'Type': self.info[id_]['type'] = self.type_map[value] elif key == 'Description': self.info[id_]['description'] = value if len(info) > 4: self.info[id_]['description'] += '; '.join(info[4:]) break return True
python
def add_info(self, entry): """Parse and store the info field""" entry = entry[8:-1] info = entry.split(',') if len(info) < 4: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.info[value] = {} id_ = value elif key == 'Number': if value == 'A' or value == 'G': value = -1 self.info[id_]['num_entries'] = value elif key == 'Type': self.info[id_]['type'] = self.type_map[value] elif key == 'Description': self.info[id_]['description'] = value if len(info) > 4: self.info[id_]['description'] += '; '.join(info[4:]) break return True
[ "def", "add_info", "(", "self", ",", "entry", ")", ":", "entry", "=", "entry", "[", "8", ":", "-", "1", "]", "info", "=", "entry", ".", "split", "(", "','", ")", "if", "len", "(", "info", ")", "<", "4", ":", "return", "False", "for", "v", "in...
Parse and store the info field
[ "Parse", "and", "store", "the", "info", "field" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L133-L155
train
45,706
pandeylab/pythomics
pythomics/genomics/structures.py
VCFMeta.add_filter
def add_filter(self, entry): """Parse and store the filter field""" entry = entry[10:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.filter[value] = {} id_ = value elif key == 'Description': self.filter[id_]['description'] = value if len(info) > 2: self.info[id_]['description'] += '; '.join(info[2:]) return True
python
def add_filter(self, entry): """Parse and store the filter field""" entry = entry[10:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.filter[value] = {} id_ = value elif key == 'Description': self.filter[id_]['description'] = value if len(info) > 2: self.info[id_]['description'] += '; '.join(info[2:]) return True
[ "def", "add_filter", "(", "self", ",", "entry", ")", ":", "entry", "=", "entry", "[", "10", ":", "-", "1", "]", "info", "=", "entry", ".", "split", "(", "','", ")", "if", "len", "(", "info", ")", "<", "2", ":", "return", "False", "for", "v", ...
Parse and store the filter field
[ "Parse", "and", "store", "the", "filter", "field" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L157-L172
train
45,707
pandeylab/pythomics
pythomics/genomics/structures.py
VCFMeta.add_alt
def add_alt(self, entry): """Parse and store the alternative allele field""" entry = entry[7:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.alt[value] = {} id_ = value elif key == 'Description': self.alt[id_]['description'] = value if len(info) > 4: self.alt[id_]['description'] += '; '.join(info[4:]) break return True
python
def add_alt(self, entry): """Parse and store the alternative allele field""" entry = entry[7:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.alt[value] = {} id_ = value elif key == 'Description': self.alt[id_]['description'] = value if len(info) > 4: self.alt[id_]['description'] += '; '.join(info[4:]) break return True
[ "def", "add_alt", "(", "self", ",", "entry", ")", ":", "entry", "=", "entry", "[", "7", ":", "-", "1", "]", "info", "=", "entry", ".", "split", "(", "','", ")", "if", "len", "(", "info", ")", "<", "2", ":", "return", "False", "for", "v", "in"...
Parse and store the alternative allele field
[ "Parse", "and", "store", "the", "alternative", "allele", "field" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L198-L214
train
45,708
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.sample_string
def sample_string(self, individual=-1): """Returns the VCF entry as it appears in the vcf file""" base = str(self) extra = self.get_sample_info(individual=individual) extra = [':'.join([str(j) for j in i]) for i in zip(*extra.values())] return '\t'.join([base, '\t'.join(extra)])
python
def sample_string(self, individual=-1): """Returns the VCF entry as it appears in the vcf file""" base = str(self) extra = self.get_sample_info(individual=individual) extra = [':'.join([str(j) for j in i]) for i in zip(*extra.values())] return '\t'.join([base, '\t'.join(extra)])
[ "def", "sample_string", "(", "self", ",", "individual", "=", "-", "1", ")", ":", "base", "=", "str", "(", "self", ")", "extra", "=", "self", ".", "get_sample_info", "(", "individual", "=", "individual", ")", "extra", "=", "[", "':'", ".", "join", "("...
Returns the VCF entry as it appears in the vcf file
[ "Returns", "the", "VCF", "entry", "as", "it", "appears", "in", "the", "vcf", "file" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L245-L250
train
45,709
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.get_sample_info
def get_sample_info(self, individual=-1): """Returns the sample info of a given sample or all by default """ if isinstance(individual, str): individual = self.individuals[individual] extra = OrderedDict() for format_ in self.format: index = getattr(self, format_) if index != -1: if format_ == 'GT': d = self.genotype elif format_ == 'GQ': d = self.genome_quality elif format_ == 'DP': d = self.depth if individual == -1: if len(d) != len(self.samples): [self.parse_sample(i) for i in six.moves.range(len(self.samples))] extra[format_] = [d[i] for i in six.moves.range(len(d))] else: if individual not in d: self.parse_sample(individual) extra[format_] = [d[individual]] return extra
python
def get_sample_info(self, individual=-1): """Returns the sample info of a given sample or all by default """ if isinstance(individual, str): individual = self.individuals[individual] extra = OrderedDict() for format_ in self.format: index = getattr(self, format_) if index != -1: if format_ == 'GT': d = self.genotype elif format_ == 'GQ': d = self.genome_quality elif format_ == 'DP': d = self.depth if individual == -1: if len(d) != len(self.samples): [self.parse_sample(i) for i in six.moves.range(len(self.samples))] extra[format_] = [d[i] for i in six.moves.range(len(d))] else: if individual not in d: self.parse_sample(individual) extra[format_] = [d[individual]] return extra
[ "def", "get_sample_info", "(", "self", ",", "individual", "=", "-", "1", ")", ":", "if", "isinstance", "(", "individual", ",", "str", ")", ":", "individual", "=", "self", ".", "individuals", "[", "individual", "]", "extra", "=", "OrderedDict", "(", ")", ...
Returns the sample info of a given sample or all by default
[ "Returns", "the", "sample", "info", "of", "a", "given", "sample", "or", "all", "by", "default" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L252-L276
train
45,710
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.is_homozygous
def is_homozygous(self, individual=None): """This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele. """ if individual is not None: if isinstance(individual, str): individual = self.individuals[individual] alts = self.genotype[individual] return [sum(alts) == len(alts)] if sum(alts) > 0 else [False] else: return [sum(alts) == len(alts) if sum(alts) > 0 else False for i, alts in self.genotype.iteritems()]
python
def is_homozygous(self, individual=None): """This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele. """ if individual is not None: if isinstance(individual, str): individual = self.individuals[individual] alts = self.genotype[individual] return [sum(alts) == len(alts)] if sum(alts) > 0 else [False] else: return [sum(alts) == len(alts) if sum(alts) > 0 else False for i, alts in self.genotype.iteritems()]
[ "def", "is_homozygous", "(", "self", ",", "individual", "=", "None", ")", ":", "if", "individual", "is", "not", "None", ":", "if", "isinstance", "(", "individual", ",", "str", ")", ":", "individual", "=", "self", ".", "individuals", "[", "individual", "]...
This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele.
[ "This", "will", "give", "a", "boolean", "list", "corresponding", "to", "whether", "each", "individual", "is", "homozygous", "for", "the", "alternative", "allele", "." ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L278-L289
train
45,711
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.get_alt
def get_alt(self, individual=0, nucleotides_only=True): """Returns the alternative alleles of the individual as a list""" #not i.startswith(',') is put in to handle cases like <DEL:ME:ALU> where we have no alternate allele #but some reference if isinstance(individual, str): individual = self.individuals[individual] if nucleotides_only: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')] else: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0]
python
def get_alt(self, individual=0, nucleotides_only=True): """Returns the alternative alleles of the individual as a list""" #not i.startswith(',') is put in to handle cases like <DEL:ME:ALU> where we have no alternate allele #but some reference if isinstance(individual, str): individual = self.individuals[individual] if nucleotides_only: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')] else: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0]
[ "def", "get_alt", "(", "self", ",", "individual", "=", "0", ",", "nucleotides_only", "=", "True", ")", ":", "#not i.startswith(',') is put in to handle cases like <DEL:ME:ALU> where we have no alternate allele", "#but some reference", "if", "isinstance", "(", "individual", ",...
Returns the alternative alleles of the individual as a list
[ "Returns", "the", "alternative", "alleles", "of", "the", "individual", "as", "a", "list" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L305-L314
train
45,712
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.get_alt_length
def get_alt_length(self, individual=0): """Returns the number of basepairs of each alternative allele""" if isinstance(individual, str): individual = self.individuals[individual] return [len(self.alt[i-1].replace('.','')) for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')]
python
def get_alt_length(self, individual=0): """Returns the number of basepairs of each alternative allele""" if isinstance(individual, str): individual = self.individuals[individual] return [len(self.alt[i-1].replace('.','')) for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')]
[ "def", "get_alt_length", "(", "self", ",", "individual", "=", "0", ")", ":", "if", "isinstance", "(", "individual", ",", "str", ")", ":", "individual", "=", "self", ".", "individuals", "[", "individual", "]", "return", "[", "len", "(", "self", ".", "al...
Returns the number of basepairs of each alternative allele
[ "Returns", "the", "number", "of", "basepairs", "of", "each", "alternative", "allele" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L316-L320
train
45,713
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.get_alt_lengths
def get_alt_lengths(self): """Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual """ #this is a hack to store the # of individuals without having to actually store it out = [] for i in six.moves.range(len(self.genotype)): valid_alt = self.get_alt_length(individual=i) if not valid_alt: out.append(None) else: out.append(max(valid_alt)-len(self.ref)) return out
python
def get_alt_lengths(self): """Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual """ #this is a hack to store the # of individuals without having to actually store it out = [] for i in six.moves.range(len(self.genotype)): valid_alt = self.get_alt_length(individual=i) if not valid_alt: out.append(None) else: out.append(max(valid_alt)-len(self.ref)) return out
[ "def", "get_alt_lengths", "(", "self", ")", ":", "#this is a hack to store the # of individuals without having to actually store it", "out", "=", "[", "]", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "self", ".", "genotype", ")", ")", "...
Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual
[ "Returns", "the", "longest", "length", "of", "the", "variant", ".", "For", "deletions", "return", "is", "negative", "SNPs", "return", "0", "and", "insertions", "are", "+", ".", "None", "return", "corresponds", "to", "no", "variant", "in", "interval", "for", ...
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L322-L336
train
45,714
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.has_snp
def has_snp(self, individual=0): """Returns a boolean list of SNP status, ordered by samples""" if isinstance(individual, str): individual = self.individuals[individual] alts = self.get_alt(individual=individual) if alts: return [i != self.ref and len(i) == len(self.ref) for i in alts] return [False]
python
def has_snp(self, individual=0): """Returns a boolean list of SNP status, ordered by samples""" if isinstance(individual, str): individual = self.individuals[individual] alts = self.get_alt(individual=individual) if alts: return [i != self.ref and len(i) == len(self.ref) for i in alts] return [False]
[ "def", "has_snp", "(", "self", ",", "individual", "=", "0", ")", ":", "if", "isinstance", "(", "individual", ",", "str", ")", ":", "individual", "=", "self", ".", "individuals", "[", "individual", "]", "alts", "=", "self", ".", "get_alt", "(", "individ...
Returns a boolean list of SNP status, ordered by samples
[ "Returns", "a", "boolean", "list", "of", "SNP", "status", "ordered", "by", "samples" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L338-L345
train
45,715
pandeylab/pythomics
pythomics/genomics/structures.py
VCFEntry.parse_entry
def parse_entry(self, entry): """This parses a VCF row and stores the relevant information""" entry = entry.split('\t') self.chrom, self.pos, self.id, self.ref, alt_, self.qual, filter_, info, self.format = entry[:9] self.samples = entry[9:] self.alt = alt_.split(',') if filter_ == 'PASS' or filter_ == '.': self.passed = True else: self.passed = filter_.split(';') self.info = info # currently unused #if info != '.': #info_l = info.split(';') #self.info = [v.split('=') if '=' in v else (v,1) for v in info_l] self.format = self.format.split(':') if 'GT' in self.format: self.GT = self.format.index('GT') if 'GQ' in self.format: self.GQ = self.format.index('GQ') if 'DP' in self.format: self.DP = self.format.index('DP') if 'FT' in self.format: self.FT = self.format.index('FT')
python
def parse_entry(self, entry): """This parses a VCF row and stores the relevant information""" entry = entry.split('\t') self.chrom, self.pos, self.id, self.ref, alt_, self.qual, filter_, info, self.format = entry[:9] self.samples = entry[9:] self.alt = alt_.split(',') if filter_ == 'PASS' or filter_ == '.': self.passed = True else: self.passed = filter_.split(';') self.info = info # currently unused #if info != '.': #info_l = info.split(';') #self.info = [v.split('=') if '=' in v else (v,1) for v in info_l] self.format = self.format.split(':') if 'GT' in self.format: self.GT = self.format.index('GT') if 'GQ' in self.format: self.GQ = self.format.index('GQ') if 'DP' in self.format: self.DP = self.format.index('DP') if 'FT' in self.format: self.FT = self.format.index('FT')
[ "def", "parse_entry", "(", "self", ",", "entry", ")", ":", "entry", "=", "entry", ".", "split", "(", "'\\t'", ")", "self", ".", "chrom", ",", "self", ".", "pos", ",", "self", ".", "id", ",", "self", ".", "ref", ",", "alt_", ",", "self", ".", "q...
This parses a VCF row and stores the relevant information
[ "This", "parses", "a", "VCF", "row", "and", "stores", "the", "relevant", "information" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L373-L396
train
45,716
pandeylab/pythomics
pythomics/genomics/structures.py
GFFFeature.add_child
def add_child(self, child): """Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows. """ child_id = getattr(child, 'id', None) if child_id: if not hasattr(self, 'children'): self.children = {} if child_id not in self.children: self.children[child_id] = child
python
def add_child(self, child): """Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows. """ child_id = getattr(child, 'id', None) if child_id: if not hasattr(self, 'children'): self.children = {} if child_id not in self.children: self.children[child_id] = child
[ "def", "add_child", "(", "self", ",", "child", ")", ":", "child_id", "=", "getattr", "(", "child", ",", "'id'", ",", "None", ")", "if", "child_id", ":", "if", "not", "hasattr", "(", "self", ",", "'children'", ")", ":", "self", ".", "children", "=", ...
Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows.
[ "Children", "are", "GFFFeatures", "and", "are", "defined", "when", "added", ".", "This", "is", "done", "to", "avoid", "memory", "overheads", "that", "may", "be", "incurred", "by", "GFF", "files", "that", "have", "millions", "of", "rows", "." ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L495-L505
train
45,717
weso/CWR-DataApi
cwr/parser/encoder/standart/record.py
_iso_handler
def _iso_handler(obj): """ Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object """ if hasattr(obj, 'isoformat'): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
python
def _iso_handler(obj): """ Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object """ if hasattr(obj, 'isoformat'): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
[ "def", "_iso_handler", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'isoformat'", ")", ":", "result", "=", "obj", ".", "isoformat", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Unserializable object {} of type {}\"", ".", "format", "(", ...
Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object
[ "Transforms", "an", "object", "into", "it", "s", "ISO", "format", "if", "possible", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/encoder/standart/record.py#L17-L36
train
45,718
calmjs/calmjs
src/calmjs/runtime.py
BootstrapRuntime.argparser
def argparser(self): """ For setting up the argparser for this instance. """ if self.__argparser is None: self.__argparser = self.argparser_factory() self.init_argparser(self.__argparser) return self.__argparser
python
def argparser(self): """ For setting up the argparser for this instance. """ if self.__argparser is None: self.__argparser = self.argparser_factory() self.init_argparser(self.__argparser) return self.__argparser
[ "def", "argparser", "(", "self", ")", ":", "if", "self", ".", "__argparser", "is", "None", ":", "self", ".", "__argparser", "=", "self", ".", "argparser_factory", "(", ")", "self", ".", "init_argparser", "(", "self", ".", "__argparser", ")", "return", "s...
For setting up the argparser for this instance.
[ "For", "setting", "up", "the", "argparser", "for", "this", "instance", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L135-L143
train
45,719
calmjs/calmjs
src/calmjs/runtime.py
BootstrapRuntime.argparser_factory
def argparser_factory(self): """ Produces argparser for this type of Runtime. """ return ArgumentParser( prog=self.prog, description=self.__doc__, add_help=False, )
python
def argparser_factory(self): """ Produces argparser for this type of Runtime. """ return ArgumentParser( prog=self.prog, description=self.__doc__, add_help=False, )
[ "def", "argparser_factory", "(", "self", ")", ":", "return", "ArgumentParser", "(", "prog", "=", "self", ".", "prog", ",", "description", "=", "self", ".", "__doc__", ",", "add_help", "=", "False", ",", ")" ]
Produces argparser for this type of Runtime.
[ "Produces", "argparser", "for", "this", "type", "of", "Runtime", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L145-L152
train
45,720
calmjs/calmjs
src/calmjs/runtime.py
Runtime.unrecognized_arguments_error
def unrecognized_arguments_error(self, args, parsed, extras): """ This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early. """ # loop variants kwargs = vars(parsed) failed = list(extras) # initial values runtime, subparser, idx = (self, self.argparser, 0) # recursion not actually needed when it can be flattened. while isinstance(runtime, Runtime): cmd = kwargs.pop(runtime.action_key) # can happen if it wasn't set, or is set but from a default # value (thus not provided by args) action_idx = None if cmd not in args else args.index(cmd) if cmd not in args and cmd is not None: # this normally shouldn't happen, and the test case # showed that the parsing will not flip down to the # forced default subparser - this can remain a debug # message until otherwise. logger.debug( "command for prog=%r is set to %r without being specified " "as part of the input arguments - the following error " "message may contain misleading references", subparser.prog, cmd ) subargs = args[idx:action_idx] subparsed, subextras = subparser.parse_known_args(subargs) if subextras: subparser.unrecognized_arguments_error(subextras) # since the failed arguments are in order failed = failed[len(subextras):] if not failed: # have taken everything, quit now. # also note that if cmd was really None it would # cause KeyError below, but fortunately it also # forced action_idx to be None which took all # remaining tokens from failed, so definitely get # out of here. break # advance the values # note that any internal consistency will almost certainly # result in KeyError being raised. details = runtime.get_argparser_details(subparser) runtime = details.runtimes[cmd] subparser = details.subparsers[cmd] idx = action_idx + 1 if failed: subparser.unrecognized_arguments_error(failed) sys.exit(2)
python
def unrecognized_arguments_error(self, args, parsed, extras): """ This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early. """ # loop variants kwargs = vars(parsed) failed = list(extras) # initial values runtime, subparser, idx = (self, self.argparser, 0) # recursion not actually needed when it can be flattened. while isinstance(runtime, Runtime): cmd = kwargs.pop(runtime.action_key) # can happen if it wasn't set, or is set but from a default # value (thus not provided by args) action_idx = None if cmd not in args else args.index(cmd) if cmd not in args and cmd is not None: # this normally shouldn't happen, and the test case # showed that the parsing will not flip down to the # forced default subparser - this can remain a debug # message until otherwise. logger.debug( "command for prog=%r is set to %r without being specified " "as part of the input arguments - the following error " "message may contain misleading references", subparser.prog, cmd ) subargs = args[idx:action_idx] subparsed, subextras = subparser.parse_known_args(subargs) if subextras: subparser.unrecognized_arguments_error(subextras) # since the failed arguments are in order failed = failed[len(subextras):] if not failed: # have taken everything, quit now. # also note that if cmd was really None it would # cause KeyError below, but fortunately it also # forced action_idx to be None which took all # remaining tokens from failed, so definitely get # out of here. break # advance the values # note that any internal consistency will almost certainly # result in KeyError being raised. details = runtime.get_argparser_details(subparser) runtime = details.runtimes[cmd] subparser = details.subparsers[cmd] idx = action_idx + 1 if failed: subparser.unrecognized_arguments_error(failed) sys.exit(2)
[ "def", "unrecognized_arguments_error", "(", "self", ",", "args", ",", "parsed", ",", "extras", ")", ":", "# loop variants", "kwargs", "=", "vars", "(", "parsed", ")", "failed", "=", "list", "(", "extras", ")", "# initial values", "runtime", ",", "subparser", ...
This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early.
[ "This", "exists", "because", "argparser", "is", "dumb", "and", "naive", "and", "doesn", "t", "fail", "unrecognized", "arguments", "early", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L619-L672
train
45,721
calmjs/calmjs
src/calmjs/runtime.py
Runtime.error
def error(self, argparser, target, message): """ This was used as part of the original non-recursive lookup for the target parser. """ warnings.warn( 'Runtime.error is deprecated and will be removed by calmjs-4.0.0', DeprecationWarning) details = self.get_argparser_details(argparser) argparser = details.subparsers[target] if details else self.argparser argparser.error(message)
python
def error(self, argparser, target, message): """ This was used as part of the original non-recursive lookup for the target parser. """ warnings.warn( 'Runtime.error is deprecated and will be removed by calmjs-4.0.0', DeprecationWarning) details = self.get_argparser_details(argparser) argparser = details.subparsers[target] if details else self.argparser argparser.error(message)
[ "def", "error", "(", "self", ",", "argparser", ",", "target", ",", "message", ")", ":", "warnings", ".", "warn", "(", "'Runtime.error is deprecated and will be removed by calmjs-4.0.0'", ",", "DeprecationWarning", ")", "details", "=", "self", ".", "get_argparser_detai...
This was used as part of the original non-recursive lookup for the target parser.
[ "This", "was", "used", "as", "part", "of", "the", "original", "non", "-", "recursive", "lookup", "for", "the", "target", "parser", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L674-L685
train
45,722
calmjs/calmjs
src/calmjs/runtime.py
ToolchainRuntime.init_argparser_working_dir
def init_argparser_working_dir( self, argparser, explanation='', help_template=( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)'), ): """ Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option. """ cwd = self.toolchain.join_cwd() argparser.add_argument( '--working-dir', dest=WORKING_DIR, metavar=metavar(WORKING_DIR), default=cwd, help=help_template % {'explanation': explanation, 'cwd': cwd}, )
python
def init_argparser_working_dir( self, argparser, explanation='', help_template=( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)'), ): """ Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option. """ cwd = self.toolchain.join_cwd() argparser.add_argument( '--working-dir', dest=WORKING_DIR, metavar=metavar(WORKING_DIR), default=cwd, help=help_template % {'explanation': explanation, 'cwd': cwd}, )
[ "def", "init_argparser_working_dir", "(", "self", ",", "argparser", ",", "explanation", "=", "''", ",", "help_template", "=", "(", "'the working directory; %(explanation)s'", "'default is current working directory (%(cwd)s)'", ")", ",", ")", ":", "cwd", "=", "self", "."...
Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option.
[ "Subclass", "could", "an", "extra", "expanation", "on", "how", "this", "is", "used", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L792-L816
train
45,723
calmjs/calmjs
src/calmjs/runtime.py
ToolchainRuntime.init_argparser_build_dir
def init_argparser_build_dir( self, argparser, help=( 'the build directory, where all sources will be copied to ' 'as part of the build process; if left unspecified, the ' 'default behavior is to create a new temporary directory ' 'that will be removed upon conclusion of the build; if ' 'specified, it must be an existing directory and all files ' 'for the build will be copied there instead, overwriting any ' 'existing file, with no cleanup done after.' )): """ For setting up build directory """ argparser.add_argument( '--build-dir', default=None, dest=BUILD_DIR, metavar=metavar(BUILD_DIR), help=help, )
python
def init_argparser_build_dir( self, argparser, help=( 'the build directory, where all sources will be copied to ' 'as part of the build process; if left unspecified, the ' 'default behavior is to create a new temporary directory ' 'that will be removed upon conclusion of the build; if ' 'specified, it must be an existing directory and all files ' 'for the build will be copied there instead, overwriting any ' 'existing file, with no cleanup done after.' )): """ For setting up build directory """ argparser.add_argument( '--build-dir', default=None, dest=BUILD_DIR, metavar=metavar(BUILD_DIR), help=help, )
[ "def", "init_argparser_build_dir", "(", "self", ",", "argparser", ",", "help", "=", "(", "'the build directory, where all sources will be copied to '", "'as part of the build process; if left unspecified, the '", "'default behavior is to create a new temporary directory '", "'that will be ...
For setting up build directory
[ "For", "setting", "up", "build", "directory" ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L818-L835
train
45,724
calmjs/calmjs
src/calmjs/runtime.py
ToolchainRuntime.init_argparser_optional_advice
def init_argparser_optional_advice( self, argparser, default=[], help=( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' )): """ For setting up optional advice. """ argparser.add_argument( '--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help )
python
def init_argparser_optional_advice( self, argparser, default=[], help=( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' )): """ For setting up optional advice. """ argparser.add_argument( '--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help )
[ "def", "init_argparser_optional_advice", "(", "self", ",", "argparser", ",", "default", "=", "[", "]", ",", "help", "=", "(", "'a comma separated list of packages to retrieve optional '", "'advice from; the provided packages should have registered '", "'the appropriate entry points...
For setting up optional advice.
[ "For", "setting", "up", "optional", "advice", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L837-L854
train
45,725
calmjs/calmjs
src/calmjs/runtime.py
ToolchainRuntime.prepare_spec
def prepare_spec(self, spec, **kwargs): """ Prepare a spec for usage with the generic ToolchainRuntime. Subclasses should avoid overriding this; override create_spec instead. """ self.prepare_spec_debug_flag(spec, **kwargs) self.prepare_spec_export_target_checks(spec, **kwargs) # defer the setup till the actual toolchain invocation spec.advise(SETUP, self.prepare_spec_advice_packages, spec, **kwargs)
python
def prepare_spec(self, spec, **kwargs): """ Prepare a spec for usage with the generic ToolchainRuntime. Subclasses should avoid overriding this; override create_spec instead. """ self.prepare_spec_debug_flag(spec, **kwargs) self.prepare_spec_export_target_checks(spec, **kwargs) # defer the setup till the actual toolchain invocation spec.advise(SETUP, self.prepare_spec_advice_packages, spec, **kwargs)
[ "def", "prepare_spec", "(", "self", ",", "spec", ",", "*", "*", "kwargs", ")", ":", "self", ".", "prepare_spec_debug_flag", "(", "spec", ",", "*", "*", "kwargs", ")", "self", ".", "prepare_spec_export_target_checks", "(", "spec", ",", "*", "*", "kwargs", ...
Prepare a spec for usage with the generic ToolchainRuntime. Subclasses should avoid overriding this; override create_spec instead.
[ "Prepare", "a", "spec", "for", "usage", "with", "the", "generic", "ToolchainRuntime", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L926-L937
train
45,726
calmjs/calmjs
src/calmjs/runtime.py
ToolchainRuntime.kwargs_to_spec
def kwargs_to_spec(self, **kwargs): """ Turn the provided kwargs into arguments ready for toolchain. """ spec = self.create_spec(**kwargs) self.prepare_spec(spec, **kwargs) return spec
python
def kwargs_to_spec(self, **kwargs): """ Turn the provided kwargs into arguments ready for toolchain. """ spec = self.create_spec(**kwargs) self.prepare_spec(spec, **kwargs) return spec
[ "def", "kwargs_to_spec", "(", "self", ",", "*", "*", "kwargs", ")", ":", "spec", "=", "self", ".", "create_spec", "(", "*", "*", "kwargs", ")", "self", ".", "prepare_spec", "(", "spec", ",", "*", "*", "kwargs", ")", "return", "spec" ]
Turn the provided kwargs into arguments ready for toolchain.
[ "Turn", "the", "provided", "kwargs", "into", "arguments", "ready", "for", "toolchain", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L947-L954
train
45,727
calmjs/calmjs
src/calmjs/runtime.py
BaseArtifactRegistryRuntime.init_argparser_package_names
def init_argparser_package_names(self, argparser, help=( 'names of the python package to generate artifacts for; ' 'note that the metadata directory for the specified ' 'packages must be writable')): """ Default helper for setting up the package_names option. This is separate so that subclasses are not assumed for the purposes of artifact creation; they should consider modifying the default help message to reflect the fact. """ argparser.add_argument( 'package_names', metavar=metavar('package'), nargs='+', help=help)
python
def init_argparser_package_names(self, argparser, help=( 'names of the python package to generate artifacts for; ' 'note that the metadata directory for the specified ' 'packages must be writable')): """ Default helper for setting up the package_names option. This is separate so that subclasses are not assumed for the purposes of artifact creation; they should consider modifying the default help message to reflect the fact. """ argparser.add_argument( 'package_names', metavar=metavar('package'), nargs='+', help=help)
[ "def", "init_argparser_package_names", "(", "self", ",", "argparser", ",", "help", "=", "(", "'names of the python package to generate artifacts for; '", "'note that the metadata directory for the specified '", "'packages must be writable'", ")", ")", ":", "argparser", ".", "add_...
Default helper for setting up the package_names option. This is separate so that subclasses are not assumed for the purposes of artifact creation; they should consider modifying the default help message to reflect the fact.
[ "Default", "helper", "for", "setting", "up", "the", "package_names", "option", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L995-L1008
train
45,728
calmjs/calmjs
src/calmjs/runtime.py
SourcePackageToolchainRuntime.init_argparser_source_registry
def init_argparser_source_registry( self, argparser, default=None, help=( 'comma separated list of registries to use for gathering ' 'JavaScript sources from the given Python packages' )): """ For setting up the source registry flag. """ argparser.add_argument( '--source-registry', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, metavar='<registry>[,<registry>[...]]', help=help, ) argparser.add_argument( '--source-registries', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, help=SUPPRESS, )
python
def init_argparser_source_registry( self, argparser, default=None, help=( 'comma separated list of registries to use for gathering ' 'JavaScript sources from the given Python packages' )): """ For setting up the source registry flag. """ argparser.add_argument( '--source-registry', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, metavar='<registry>[,<registry>[...]]', help=help, ) argparser.add_argument( '--source-registries', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, help=SUPPRESS, )
[ "def", "init_argparser_source_registry", "(", "self", ",", "argparser", ",", "default", "=", "None", ",", "help", "=", "(", "'comma separated list of registries to use for gathering '", "'JavaScript sources from the given Python packages'", ")", ")", ":", "argparser", ".", ...
For setting up the source registry flag.
[ "For", "setting", "up", "the", "source", "registry", "flag", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L1027-L1047
train
45,729
calmjs/calmjs
src/calmjs/runtime.py
SourcePackageToolchainRuntime.init_argparser_loaderplugin_registry
def init_argparser_loaderplugin_registry( self, argparser, default=None, help=( 'the name of the registry to use for the handling of loader ' 'plugins that may be loaded from the given Python packages' )): """ Default helper for setting up the loaderplugin registries flags. Note that this is NOT part of the init_argparser due to implementation specific requirements. Subclasses should consider modifying the default value help message to cater to the toolchain it encapsulates. """ argparser.add_argument( '--loaderplugin-registry', default=default, dest=CALMJS_LOADERPLUGIN_REGISTRY_NAME, action='store', metavar=metavar('registry'), help=help, )
python
def init_argparser_loaderplugin_registry( self, argparser, default=None, help=( 'the name of the registry to use for the handling of loader ' 'plugins that may be loaded from the given Python packages' )): """ Default helper for setting up the loaderplugin registries flags. Note that this is NOT part of the init_argparser due to implementation specific requirements. Subclasses should consider modifying the default value help message to cater to the toolchain it encapsulates. """ argparser.add_argument( '--loaderplugin-registry', default=default, dest=CALMJS_LOADERPLUGIN_REGISTRY_NAME, action='store', metavar=metavar('registry'), help=help, )
[ "def", "init_argparser_loaderplugin_registry", "(", "self", ",", "argparser", ",", "default", "=", "None", ",", "help", "=", "(", "'the name of the registry to use for the handling of loader '", "'plugins that may be loaded from the given Python packages'", ")", ")", ":", "argp...
Default helper for setting up the loaderplugin registries flags. Note that this is NOT part of the init_argparser due to implementation specific requirements. Subclasses should consider modifying the default value help message to cater to the toolchain it encapsulates.
[ "Default", "helper", "for", "setting", "up", "the", "loaderplugin", "registries", "flags", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/runtime.py#L1049-L1068
train
45,730
weso/CWR-DataApi
cwr/grammar/field/basic.py
_check_not_empty
def _check_not_empty(string): """ Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value """ string = string.strip() if len(string) == 0: message = 'The string should not be empty' raise pp.ParseException(message)
python
def _check_not_empty(string): """ Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value """ string = string.strip() if len(string) == 0: message = 'The string should not be empty' raise pp.ParseException(message)
[ "def", "_check_not_empty", "(", "string", ")", ":", "string", "=", "string", ".", "strip", "(", ")", "if", "len", "(", "string", ")", "==", "0", ":", "message", "=", "'The string should not be empty'", "raise", "pp", ".", "ParseException", "(", "message", ...
Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value
[ "Checks", "that", "the", "string", "is", "not", "empty", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L108-L122
train
45,731
weso/CWR-DataApi
cwr/grammar/field/basic.py
_to_numeric_float
def _to_numeric_float(number, nums_int): """ Transforms a string into a float. The nums_int parameter indicates the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param number: string with the number :param nums_int: characters, counting from the left, for the integer value :return: a float created from the string """ index_end = len(number) - nums_int return float(number[:nums_int] + '.' + number[-index_end:])
python
def _to_numeric_float(number, nums_int): """ Transforms a string into a float. The nums_int parameter indicates the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param number: string with the number :param nums_int: characters, counting from the left, for the integer value :return: a float created from the string """ index_end = len(number) - nums_int return float(number[:nums_int] + '.' + number[-index_end:])
[ "def", "_to_numeric_float", "(", "number", ",", "nums_int", ")", ":", "index_end", "=", "len", "(", "number", ")", "-", "nums_int", "return", "float", "(", "number", "[", ":", "nums_int", "]", "+", "'.'", "+", "number", "[", "-", "index_end", ":", "]",...
Transforms a string into a float. The nums_int parameter indicates the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param number: string with the number :param nums_int: characters, counting from the left, for the integer value :return: a float created from the string
[ "Transforms", "a", "string", "into", "a", "float", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L245-L258
train
45,732
weso/CWR-DataApi
cwr/grammar/field/basic.py
_check_above_value_float
def _check_above_value_float(string, minimum): """ Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value """ value = float(string) if value < minimum: message = 'The Numeric Field value should be above %s' % minimum raise pp.ParseException(message)
python
def _check_above_value_float(string, minimum): """ Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value """ value = float(string) if value < minimum: message = 'The Numeric Field value should be above %s' % minimum raise pp.ParseException(message)
[ "def", "_check_above_value_float", "(", "string", ",", "minimum", ")", ":", "value", "=", "float", "(", "string", ")", "if", "value", "<", "minimum", ":", "message", "=", "'The Numeric Field value should be above %s'", "%", "minimum", "raise", "pp", ".", "ParseE...
Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value
[ "Checks", "that", "the", "number", "parsed", "from", "the", "string", "is", "above", "a", "minimum", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L261-L276
train
45,733
weso/CWR-DataApi
cwr/grammar/field/basic.py
_to_boolean
def _to_boolean(string): """ Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N' """ if string == 'Y': result = True elif string == 'N': result = False else: raise pp.ParseException(string, msg='Is not a valid boolean value') return result
python
def _to_boolean(string): """ Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N' """ if string == 'Y': result = True elif string == 'N': result = False else: raise pp.ParseException(string, msg='Is not a valid boolean value') return result
[ "def", "_to_boolean", "(", "string", ")", ":", "if", "string", "==", "'Y'", ":", "result", "=", "True", "elif", "string", "==", "'N'", ":", "result", "=", "False", "else", ":", "raise", "pp", ".", "ParseException", "(", "string", ",", "msg", "=", "'I...
Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N'
[ "Transforms", "a", "string", "into", "a", "boolean", "value", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L311-L328
train
45,734
weso/CWR-DataApi
cwr/grammar/field/basic.py
blank
def blank(columns=1, name=None): """ Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field """ if name is None: name = 'Blank Field' field = pp.Regex('[ ]{' + str(columns) + '}') field.leaveWhitespace() field.suppress() field.setName(name) return field
python
def blank(columns=1, name=None): """ Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field """ if name is None: name = 'Blank Field' field = pp.Regex('[ ]{' + str(columns) + '}') field.leaveWhitespace() field.suppress() field.setName(name) return field
[ "def", "blank", "(", "columns", "=", "1", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "'Blank Field'", "field", "=", "pp", ".", "Regex", "(", "'[ ]{'", "+", "str", "(", "columns", ")", "+", "'}'", ")", "field...
Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field
[ "Creates", "the", "grammar", "for", "a", "blank", "field", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L496-L517
train
45,735
pandeylab/pythomics
pythomics/genomics/parsers.py
GFFReader.contains
def contains(self, seqid, start, end, overlap=True): """This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects """ d = self.positions.get(seqid,[]) if overlap: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if not (end <= gff_start or start >= gff_end)] else: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if (gff_start <= start and gff_end >= end)]
python
def contains(self, seqid, start, end, overlap=True): """This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects """ d = self.positions.get(seqid,[]) if overlap: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if not (end <= gff_start or start >= gff_end)] else: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if (gff_start <= start and gff_end >= end)]
[ "def", "contains", "(", "self", ",", "seqid", ",", "start", ",", "end", ",", "overlap", "=", "True", ")", ":", "d", "=", "self", ".", "positions", ".", "get", "(", "seqid", ",", "[", "]", ")", "if", "overlap", ":", "return", "[", "gff_object", "f...
This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects
[ "This", "returns", "a", "list", "of", "GFF", "objects", "which", "cover", "a", "specified", "location", "." ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/parsers.py#L232-L251
train
45,736
pandeylab/pythomics
pythomics/genomics/parsers.py
VCFReader.contains
def contains(self, chrom, start, end, overlap=True): """This returns a list of VCFEntry objects which cover a specified location. :param chrom: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a VCFEntry object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of VCFEntry objects """ d = self.positions.get(chrom,[]) if overlap: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if not (end < vcf_start or start > vcf_end)] else: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if (vcf_start <= start and vcf_end >= end)]
python
def contains(self, chrom, start, end, overlap=True): """This returns a list of VCFEntry objects which cover a specified location. :param chrom: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a VCFEntry object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of VCFEntry objects """ d = self.positions.get(chrom,[]) if overlap: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if not (end < vcf_start or start > vcf_end)] else: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if (vcf_start <= start and vcf_end >= end)]
[ "def", "contains", "(", "self", ",", "chrom", ",", "start", ",", "end", ",", "overlap", "=", "True", ")", ":", "d", "=", "self", ".", "positions", ".", "get", "(", "chrom", ",", "[", "]", ")", "if", "overlap", ":", "return", "[", "vcf_entry", "fo...
This returns a list of VCFEntry objects which cover a specified location. :param chrom: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a VCFEntry object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of VCFEntry objects
[ "This", "returns", "a", "list", "of", "VCFEntry", "objects", "which", "cover", "a", "specified", "location", "." ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/parsers.py#L305-L324
train
45,737
pandeylab/pythomics
pythomics/genomics/parsers.py
VCFReader.remove_variants
def remove_variants(self, variants): """Remove a list of variants from the positions we are scanning""" chroms = set([i.chrom for i in variants]) for chrom in chroms: if self.append_chromosome: chrom = 'chr%s' % chrom to_delete = [pos for pos in self.positions[chrom] if pos in variants] for pos in to_delete: del self.positions[chrom][pos]
python
def remove_variants(self, variants): """Remove a list of variants from the positions we are scanning""" chroms = set([i.chrom for i in variants]) for chrom in chroms: if self.append_chromosome: chrom = 'chr%s' % chrom to_delete = [pos for pos in self.positions[chrom] if pos in variants] for pos in to_delete: del self.positions[chrom][pos]
[ "def", "remove_variants", "(", "self", ",", "variants", ")", ":", "chroms", "=", "set", "(", "[", "i", ".", "chrom", "for", "i", "in", "variants", "]", ")", "for", "chrom", "in", "chroms", ":", "if", "self", ".", "append_chromosome", ":", "chrom", "=...
Remove a list of variants from the positions we are scanning
[ "Remove", "a", "list", "of", "variants", "from", "the", "positions", "we", "are", "scanning" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/parsers.py#L326-L334
train
45,738
calmjs/calmjs
src/calmjs/loaderplugin.py
LoaderPluginHandler.generate_handler_sourcepath
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ The default implementation is a recursive lookup method, which subclasses may make use of. Subclasses must implement this to return a mapping of modnames the the absolute path of the desired sourcefiles. Example: return { 'text': '/tmp/src/example_module/text/index.js', 'json': '/tmp/src/example_module/json/index.js', } Subclasses of this implementation must accept the same arguments, and they should invoke this implementation via super and merge its results (e.g. using dict.update) with one provided by this one. Also, this implementation depends on a correct unwrap implementation for the loaderplugin at hand, if required. """ # since the loaderplugin_sourcepath values is the complete # modpath with the loader plugin, the values must be stripped # before making use of the filtering helper function for # grouping the inner mappings fake_spec = {} registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if registry: fake_spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry spec_update_sourcepath_filter_loaderplugins(fake_spec, { self.unwrap(k): v for k, v in loaderplugin_sourcepath.items() }, 'current', 'nested') result = {} for plugin_name, sourcepath in fake_spec['nested'].items(): if sourcepath == loaderplugin_sourcepath: logger.warning( "loaderplugin '%s' extracted same sourcepath of while " "locating chain loaders: %s; skipping", self.name, sourcepath ) continue plugin = self.registry.get_record(plugin_name) if not plugin: logger.warning( "loaderplugin '%s' from registry '%s' cannot find " "sibling loaderplugin handler for '%s'; processing " "may fail for the following nested/chained sources: " "%s", self.name, self.registry.registry_name, plugin_name, sourcepath, ) continue result.update(plugin.generate_handler_sourcepath( toolchain, spec, sourcepath)) return result
python
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ The default implementation is a recursive lookup method, which subclasses may make use of. Subclasses must implement this to return a mapping of modnames the the absolute path of the desired sourcefiles. Example: return { 'text': '/tmp/src/example_module/text/index.js', 'json': '/tmp/src/example_module/json/index.js', } Subclasses of this implementation must accept the same arguments, and they should invoke this implementation via super and merge its results (e.g. using dict.update) with one provided by this one. Also, this implementation depends on a correct unwrap implementation for the loaderplugin at hand, if required. """ # since the loaderplugin_sourcepath values is the complete # modpath with the loader plugin, the values must be stripped # before making use of the filtering helper function for # grouping the inner mappings fake_spec = {} registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if registry: fake_spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry spec_update_sourcepath_filter_loaderplugins(fake_spec, { self.unwrap(k): v for k, v in loaderplugin_sourcepath.items() }, 'current', 'nested') result = {} for plugin_name, sourcepath in fake_spec['nested'].items(): if sourcepath == loaderplugin_sourcepath: logger.warning( "loaderplugin '%s' extracted same sourcepath of while " "locating chain loaders: %s; skipping", self.name, sourcepath ) continue plugin = self.registry.get_record(plugin_name) if not plugin: logger.warning( "loaderplugin '%s' from registry '%s' cannot find " "sibling loaderplugin handler for '%s'; processing " "may fail for the following nested/chained sources: " "%s", self.name, self.registry.registry_name, plugin_name, sourcepath, ) continue result.update(plugin.generate_handler_sourcepath( toolchain, spec, sourcepath)) return result
[ "def", "generate_handler_sourcepath", "(", "self", ",", "toolchain", ",", "spec", ",", "loaderplugin_sourcepath", ")", ":", "# since the loaderplugin_sourcepath values is the complete", "# modpath with the loader plugin, the values must be stripped", "# before making use of the filtering...
The default implementation is a recursive lookup method, which subclasses may make use of. Subclasses must implement this to return a mapping of modnames the the absolute path of the desired sourcefiles. Example: return { 'text': '/tmp/src/example_module/text/index.js', 'json': '/tmp/src/example_module/json/index.js', } Subclasses of this implementation must accept the same arguments, and they should invoke this implementation via super and merge its results (e.g. using dict.update) with one provided by this one. Also, this implementation depends on a correct unwrap implementation for the loaderplugin at hand, if required.
[ "The", "default", "implementation", "is", "a", "recursive", "lookup", "method", "which", "subclasses", "may", "make", "use", "of", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/loaderplugin.py#L49-L104
train
45,739
calmjs/calmjs
src/calmjs/loaderplugin.py
NPMLoaderPluginHandler.generate_handler_sourcepath
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ Attempt to locate the plugin source; returns a mapping of modnames to the absolute path of the located sources. """ # TODO calmjs-4.0.0 consider formalizing to the method instead npm_pkg_name = ( self.node_module_pkg_name if self.node_module_pkg_name else self.find_node_module_pkg_name(toolchain, spec) ) if not npm_pkg_name: cls = type(self) registry_name = getattr( self.registry, 'registry_name', '<invalid_registry/handler>') if cls is NPMLoaderPluginHandler: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; please subclass " "%s:%s such that the npm package name become specified", self.name, registry_name, cls.__module__, cls.__name__, ) else: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; implementation of " "%s:%s may be at fault", self.name, registry_name, cls.__module__, cls.__name__, ) return {} working_dir = spec.get(WORKING_DIR, None) if working_dir is None: logger.info( "attempting to derive working directory using %s, as the " "provided spec is missing working_dir", toolchain ) working_dir = toolchain.join_cwd() logger.debug("deriving npm loader plugin from '%s'", working_dir) target = locate_package_entry_file(working_dir, npm_pkg_name) if target: logger.debug('picked %r for loader plugin %r', target, self.name) # use the parent recursive lookup. result = super( NPMLoaderPluginHandler, self).generate_handler_sourcepath( toolchain, spec, loaderplugin_sourcepath) result.update({self.name: target}) return result # the expected package file is not found, use the logger to show # why. # Also note that any inner/chained loaders will be dropped. if exists(join( working_dir, 'node_modules', npm_pkg_name, 'package.json')): logger.warning( "'package.json' for the npm package '%s' does not contain a " "valid entry point: sources required for loader plugin '%s' " "cannot be included automatically; the build process may fail", npm_pkg_name, self.name, ) else: logger.warning( "could not locate 'package.json' for the npm package '%s' " "which was specified to contain the loader plugin '%s' in the " "current working directory '%s'; the missing package may " "be installed by running 'npm install %s' for the mean time " "as a workaround, though the package that owns that source " "file that has this requirement should declare an explicit " "dependency; the build process may fail", npm_pkg_name, self.name, working_dir, npm_pkg_name, ) return {}
python
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ Attempt to locate the plugin source; returns a mapping of modnames to the absolute path of the located sources. """ # TODO calmjs-4.0.0 consider formalizing to the method instead npm_pkg_name = ( self.node_module_pkg_name if self.node_module_pkg_name else self.find_node_module_pkg_name(toolchain, spec) ) if not npm_pkg_name: cls = type(self) registry_name = getattr( self.registry, 'registry_name', '<invalid_registry/handler>') if cls is NPMLoaderPluginHandler: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; please subclass " "%s:%s such that the npm package name become specified", self.name, registry_name, cls.__module__, cls.__name__, ) else: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; implementation of " "%s:%s may be at fault", self.name, registry_name, cls.__module__, cls.__name__, ) return {} working_dir = spec.get(WORKING_DIR, None) if working_dir is None: logger.info( "attempting to derive working directory using %s, as the " "provided spec is missing working_dir", toolchain ) working_dir = toolchain.join_cwd() logger.debug("deriving npm loader plugin from '%s'", working_dir) target = locate_package_entry_file(working_dir, npm_pkg_name) if target: logger.debug('picked %r for loader plugin %r', target, self.name) # use the parent recursive lookup. result = super( NPMLoaderPluginHandler, self).generate_handler_sourcepath( toolchain, spec, loaderplugin_sourcepath) result.update({self.name: target}) return result # the expected package file is not found, use the logger to show # why. # Also note that any inner/chained loaders will be dropped. if exists(join( working_dir, 'node_modules', npm_pkg_name, 'package.json')): logger.warning( "'package.json' for the npm package '%s' does not contain a " "valid entry point: sources required for loader plugin '%s' " "cannot be included automatically; the build process may fail", npm_pkg_name, self.name, ) else: logger.warning( "could not locate 'package.json' for the npm package '%s' " "which was specified to contain the loader plugin '%s' in the " "current working directory '%s'; the missing package may " "be installed by running 'npm install %s' for the mean time " "as a workaround, though the package that owns that source " "file that has this requirement should declare an explicit " "dependency; the build process may fail", npm_pkg_name, self.name, working_dir, npm_pkg_name, ) return {}
[ "def", "generate_handler_sourcepath", "(", "self", ",", "toolchain", ",", "spec", ",", "loaderplugin_sourcepath", ")", ":", "# TODO calmjs-4.0.0 consider formalizing to the method instead", "npm_pkg_name", "=", "(", "self", ".", "node_module_pkg_name", "if", "self", ".", ...
Attempt to locate the plugin source; returns a mapping of modnames to the absolute path of the located sources.
[ "Attempt", "to", "locate", "the", "plugin", "source", ";", "returns", "a", "mapping", "of", "modnames", "to", "the", "absolute", "path", "of", "the", "located", "sources", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/loaderplugin.py#L126-L205
train
45,740
weso/CWR-DataApi
cwr/parser/encoder/file.py
BaseCWRFileNameEncoder.encode
def encode(self, tag): """ Parses a CWR file name from a FileTag object. The result will be a string following the format CWyynnnnsss_rrr.Vxx, where the numeric sequence will have the length set on the encoder's constructor. :param tag: FileTag to parse :return: a string file name parsed from the FileTag """ # Acquires sequence number sequence = str(tag.sequence_n) # If the sequence is bigger the max, it is cut if len(sequence) > self._sequence_l: sequence = sequence[:self._sequence_l] # If the sequence is smaller the max, it is padded with zeroes while len(sequence) < self._sequence_l: sequence = '0' + sequence # Acquires version version = str(tag.version) # If the version is too long only the first and last number are taken, # to remove decimal separator if len(version) > 2: version = version[:1] + version[-1:] # If the version is too short, it is padded with zeroes while len(version) < 2: version = '0' + version # Acquires year # Only the two last digits of the year are used year = str(tag.year)[-2:] # Acquires sender and receiver sender = tag.sender[:3] receiver = tag.receiver[:3] rule = self._header + year + sequence + sender rule = rule + self._ip_delimiter + receiver + ".V" + version return rule
python
def encode(self, tag): """ Parses a CWR file name from a FileTag object. The result will be a string following the format CWyynnnnsss_rrr.Vxx, where the numeric sequence will have the length set on the encoder's constructor. :param tag: FileTag to parse :return: a string file name parsed from the FileTag """ # Acquires sequence number sequence = str(tag.sequence_n) # If the sequence is bigger the max, it is cut if len(sequence) > self._sequence_l: sequence = sequence[:self._sequence_l] # If the sequence is smaller the max, it is padded with zeroes while len(sequence) < self._sequence_l: sequence = '0' + sequence # Acquires version version = str(tag.version) # If the version is too long only the first and last number are taken, # to remove decimal separator if len(version) > 2: version = version[:1] + version[-1:] # If the version is too short, it is padded with zeroes while len(version) < 2: version = '0' + version # Acquires year # Only the two last digits of the year are used year = str(tag.year)[-2:] # Acquires sender and receiver sender = tag.sender[:3] receiver = tag.receiver[:3] rule = self._header + year + sequence + sender rule = rule + self._ip_delimiter + receiver + ".V" + version return rule
[ "def", "encode", "(", "self", ",", "tag", ")", ":", "# Acquires sequence number", "sequence", "=", "str", "(", "tag", ".", "sequence_n", ")", "# If the sequence is bigger the max, it is cut", "if", "len", "(", "sequence", ")", ">", "self", ".", "_sequence_l", ":...
Parses a CWR file name from a FileTag object. The result will be a string following the format CWyynnnnsss_rrr.Vxx, where the numeric sequence will have the length set on the encoder's constructor. :param tag: FileTag to parse :return: a string file name parsed from the FileTag
[ "Parses", "a", "CWR", "file", "name", "from", "a", "FileTag", "object", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/encoder/file.py#L100-L145
train
45,741
weso/CWR-DataApi
cwr/parser/encoder/file.py
CwrFileEncoder.encode
def encode(self, transmission): """ Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data """ data = '' data += self._record_encode(transmission.header) for group in transmission.groups: data += self._record_encode(group.group_header) for transaction in group.transactions: for record in transaction: data += self._record_encode(record) data += self._record_encode(group.group_trailer) data += self._record_encode(transmission.trailer) return data
python
def encode(self, transmission): """ Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data """ data = '' data += self._record_encode(transmission.header) for group in transmission.groups: data += self._record_encode(group.group_header) for transaction in group.transactions: for record in transaction: data += self._record_encode(record) data += self._record_encode(group.group_trailer) data += self._record_encode(transmission.trailer) return data
[ "def", "encode", "(", "self", ",", "transmission", ")", ":", "data", "=", "''", "data", "+=", "self", ".", "_record_encode", "(", "transmission", ".", "header", ")", "for", "group", "in", "transmission", ".", "groups", ":", "data", "+=", "self", ".", "...
Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data
[ "Encodes", "the", "data", "creating", "a", "CWR", "structure", "from", "an", "instance", "from", "the", "domain", "model", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/encoder/file.py#L167-L184
train
45,742
pandeylab/pythomics
pythomics/proteomics/parsers.py
MGFIterator.getScan
def getScan(self, title, peptide=None): """ allows random lookup """ if self.ra.has_key(title): self.filename.seek(self.ra[title][0],0) toRead = self.ra[title][1]-self.ra[title][0] info = self.filename.read(toRead) scan = self.parseScan(info) else: return None return scan
python
def getScan(self, title, peptide=None): """ allows random lookup """ if self.ra.has_key(title): self.filename.seek(self.ra[title][0],0) toRead = self.ra[title][1]-self.ra[title][0] info = self.filename.read(toRead) scan = self.parseScan(info) else: return None return scan
[ "def", "getScan", "(", "self", ",", "title", ",", "peptide", "=", "None", ")", ":", "if", "self", ".", "ra", ".", "has_key", "(", "title", ")", ":", "self", ".", "filename", ".", "seek", "(", "self", ".", "ra", "[", "title", "]", "[", "0", "]",...
allows random lookup
[ "allows", "random", "lookup" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/parsers.py#L962-L973
train
45,743
pandeylab/pythomics
pythomics/proteomics/parsers.py
MGFIterator.parseScan
def parseScan(self, scan): """ All input follows the BEGIN IONS row and ends before END IONS """ setupScan = True foundCharge = False foundMass = False foundTitle = False scanObj = ScanObject() scanObj.ms_level = 2 for row in scan.split('\n'): if not row: continue entry = row.strip().split('=') if len(entry) >= 2: if entry[0] == 'PEPMASS': scanObj.mass = float(entry[1]) foundMass = True elif entry[0] == 'CHARGE': scanObj.charge = entry[1] foundCharge = True elif entry[0] == 'TITLE': # if self.titleMap: # pos = entry[1].find(',') # title = self.titleMap[int(entry[1][:entry[1].find(',')])] # else: title = '='.join(entry[1:]) foundTitle = True scanObj.title = title scanObj.id = title elif entry[0] == 'RTINSECONDS': scanObj.rt = float(entry[1]) else: mz,intensity = self.scanSplit.split(row.strip()) scanObj.scans.append((float(mz),float(intensity))) if foundCharge and foundMass and foundTitle: return scanObj return None
python
def parseScan(self, scan): """ All input follows the BEGIN IONS row and ends before END IONS """ setupScan = True foundCharge = False foundMass = False foundTitle = False scanObj = ScanObject() scanObj.ms_level = 2 for row in scan.split('\n'): if not row: continue entry = row.strip().split('=') if len(entry) >= 2: if entry[0] == 'PEPMASS': scanObj.mass = float(entry[1]) foundMass = True elif entry[0] == 'CHARGE': scanObj.charge = entry[1] foundCharge = True elif entry[0] == 'TITLE': # if self.titleMap: # pos = entry[1].find(',') # title = self.titleMap[int(entry[1][:entry[1].find(',')])] # else: title = '='.join(entry[1:]) foundTitle = True scanObj.title = title scanObj.id = title elif entry[0] == 'RTINSECONDS': scanObj.rt = float(entry[1]) else: mz,intensity = self.scanSplit.split(row.strip()) scanObj.scans.append((float(mz),float(intensity))) if foundCharge and foundMass and foundTitle: return scanObj return None
[ "def", "parseScan", "(", "self", ",", "scan", ")", ":", "setupScan", "=", "True", "foundCharge", "=", "False", "foundMass", "=", "False", "foundTitle", "=", "False", "scanObj", "=", "ScanObject", "(", ")", "scanObj", ".", "ms_level", "=", "2", "for", "ro...
All input follows the BEGIN IONS row and ends before END IONS
[ "All", "input", "follows", "the", "BEGIN", "IONS", "row", "and", "ends", "before", "END", "IONS" ]
ab0a5651a2e02a25def4d277b35fa09d1631bfcb
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/parsers.py#L975-L1012
train
45,744
calmjs/calmjs
src/calmjs/artifact.py
_cls_lookup_dist
def _cls_lookup_dist(cls): """ Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class. """ frags = cls.__module__.split('.') for name in ('.'.join(frags[:x]) for x in range(len(frags), 0, -1)): dist = find_pkg_dist(name) if dist: return dist
python
def _cls_lookup_dist(cls): """ Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class. """ frags = cls.__module__.split('.') for name in ('.'.join(frags[:x]) for x in range(len(frags), 0, -1)): dist = find_pkg_dist(name) if dist: return dist
[ "def", "_cls_lookup_dist", "(", "cls", ")", ":", "frags", "=", "cls", ".", "__module__", ".", "split", "(", "'.'", ")", "for", "name", "in", "(", "'.'", ".", "join", "(", "frags", "[", ":", "x", "]", ")", "for", "x", "in", "range", "(", "len", ...
Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class.
[ "Attempt", "to", "resolve", "the", "distribution", "from", "the", "provided", "class", "in", "the", "most", "naive", "way", "-", "this", "assumes", "the", "Python", "module", "path", "to", "the", "class", "contains", "the", "name", "of", "the", "package", ...
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L155-L166
train
45,745
calmjs/calmjs
src/calmjs/artifact.py
verify_builder
def verify_builder(builder): """ To ensure that the provided builder has a signature that is at least compatible. """ try: d = getcallargs(builder, package_names=[], export_target='some_path') except TypeError: return False return d == {'package_names': [], 'export_target': 'some_path'}
python
def verify_builder(builder): """ To ensure that the provided builder has a signature that is at least compatible. """ try: d = getcallargs(builder, package_names=[], export_target='some_path') except TypeError: return False return d == {'package_names': [], 'export_target': 'some_path'}
[ "def", "verify_builder", "(", "builder", ")", ":", "try", ":", "d", "=", "getcallargs", "(", "builder", ",", "package_names", "=", "[", "]", ",", "export_target", "=", "'some_path'", ")", "except", "TypeError", ":", "return", "False", "return", "d", "==", ...
To ensure that the provided builder has a signature that is at least compatible.
[ "To", "ensure", "that", "the", "provided", "builder", "has", "a", "signature", "that", "is", "at", "least", "compatible", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L169-L179
train
45,746
calmjs/calmjs
src/calmjs/artifact.py
extract_builder_result
def extract_builder_result(builder_result, toolchain_cls=Toolchain): """ Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance. """ try: toolchain, spec = builder_result except Exception: return None, None if not isinstance(toolchain, toolchain_cls) or not isinstance(spec, Spec): return None, None return toolchain, spec
python
def extract_builder_result(builder_result, toolchain_cls=Toolchain): """ Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance. """ try: toolchain, spec = builder_result except Exception: return None, None if not isinstance(toolchain, toolchain_cls) or not isinstance(spec, Spec): return None, None return toolchain, spec
[ "def", "extract_builder_result", "(", "builder_result", ",", "toolchain_cls", "=", "Toolchain", ")", ":", "try", ":", "toolchain", ",", "spec", "=", "builder_result", "except", "Exception", ":", "return", "None", ",", "None", "if", "not", "isinstance", "(", "t...
Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance.
[ "Extract", "the", "builder", "result", "to", "produce", "a", "Toolchain", "and", "Spec", "instance", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L182-L194
train
45,747
calmjs/calmjs
src/calmjs/artifact.py
trace_toolchain
def trace_toolchain(toolchain): """ Trace the versions of the involved packages for the provided toolchain instance. """ pkgs = [] for cls in getmro(type(toolchain)): if not issubclass(cls, Toolchain): continue dist = _cls_lookup_dist(cls) value = { 'project_name': dist.project_name, 'version': dist.version, } if dist else {} key = '%s:%s' % (cls.__module__, cls.__name__) pkgs.append({key: value}) return pkgs
python
def trace_toolchain(toolchain): """ Trace the versions of the involved packages for the provided toolchain instance. """ pkgs = [] for cls in getmro(type(toolchain)): if not issubclass(cls, Toolchain): continue dist = _cls_lookup_dist(cls) value = { 'project_name': dist.project_name, 'version': dist.version, } if dist else {} key = '%s:%s' % (cls.__module__, cls.__name__) pkgs.append({key: value}) return pkgs
[ "def", "trace_toolchain", "(", "toolchain", ")", ":", "pkgs", "=", "[", "]", "for", "cls", "in", "getmro", "(", "type", "(", "toolchain", ")", ")", ":", "if", "not", "issubclass", "(", "cls", ",", "Toolchain", ")", ":", "continue", "dist", "=", "_cls...
Trace the versions of the involved packages for the provided toolchain instance.
[ "Trace", "the", "versions", "of", "the", "involved", "packages", "for", "the", "provided", "toolchain", "instance", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L197-L214
train
45,748
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.get_artifact_filename
def get_artifact_filename(self, package_name, artifact_name): """ Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None. """ project_name = self.packages.normalize(package_name) return self.records.get((project_name, artifact_name))
python
def get_artifact_filename(self, package_name, artifact_name): """ Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None. """ project_name = self.packages.normalize(package_name) return self.records.get((project_name, artifact_name))
[ "def", "get_artifact_filename", "(", "self", ",", "package_name", ",", "artifact_name", ")", ":", "project_name", "=", "self", ".", "packages", ".", "normalize", "(", "package_name", ")", "return", "self", ".", "records", ".", "get", "(", "(", "project_name", ...
Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None.
[ "Similar", "to", "pkg_resources", ".", "resource_filename", "however", "this", "works", "with", "the", "information", "cached", "in", "this", "registry", "instance", "and", "arguments", "are", "not", "quite", "the", "same", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L344-L362
train
45,749
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.resolve_artifacts_by_builder_compat
def resolve_artifacts_by_builder_compat( self, package_names, builder_name, dependencies=False): """ Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None. """ paths = self.compat_builders.get(builder_name) if not paths: # perhaps warn, but just return return resolver = ( # traces dependencies for distribution. find_packages_requirements_dists if dependencies else # just get grabs the distribution. pkg_names_to_dists ) for distribution in resolver(package_names): path = paths.get(distribution.project_name) if path: yield path
python
def resolve_artifacts_by_builder_compat( self, package_names, builder_name, dependencies=False): """ Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None. """ paths = self.compat_builders.get(builder_name) if not paths: # perhaps warn, but just return return resolver = ( # traces dependencies for distribution. find_packages_requirements_dists if dependencies else # just get grabs the distribution. pkg_names_to_dists ) for distribution in resolver(package_names): path = paths.get(distribution.project_name) if path: yield path
[ "def", "resolve_artifacts_by_builder_compat", "(", "self", ",", "package_names", ",", "builder_name", ",", "dependencies", "=", "False", ")", ":", "paths", "=", "self", ".", "compat_builders", ".", "get", "(", "builder_name", ")", "if", "not", "paths", ":", "#...
Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None.
[ "Yield", "the", "list", "of", "paths", "to", "the", "artifacts", "in", "the", "order", "of", "the", "dependency", "resolution" ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L364-L399
train
45,750
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.get_artifact_metadata
def get_artifact_metadata(self, package_name): """ Return metadata of the artifacts built through this registry. """ filename = self.metadata.get(package_name) if not filename or not exists(filename): return {} with open(filename, encoding='utf8') as fd: contents = fd.read() try: is_json_compat(contents) except ValueError: logger.info("artifact metadata file '%s' is invalid", filename) return {} return json.loads(contents)
python
def get_artifact_metadata(self, package_name): """ Return metadata of the artifacts built through this registry. """ filename = self.metadata.get(package_name) if not filename or not exists(filename): return {} with open(filename, encoding='utf8') as fd: contents = fd.read() try: is_json_compat(contents) except ValueError: logger.info("artifact metadata file '%s' is invalid", filename) return {} return json.loads(contents)
[ "def", "get_artifact_metadata", "(", "self", ",", "package_name", ")", ":", "filename", "=", "self", ".", "metadata", ".", "get", "(", "package_name", ")", "if", "not", "filename", "or", "not", "exists", "(", "filename", ")", ":", "return", "{", "}", "wi...
Return metadata of the artifacts built through this registry.
[ "Return", "metadata", "of", "the", "artifacts", "built", "through", "this", "registry", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L401-L418
train
45,751
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.generate_metadata_entry
def generate_metadata_entry(self, entry_point, toolchain, spec): """ After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file. """ export_target = spec['export_target'] toolchain_bases = trace_toolchain(toolchain) toolchain_bin_path = spec.get(TOOLCHAIN_BIN_PATH) toolchain_bin = ([ basename(toolchain_bin_path), # bin_name get_bin_version_str(toolchain_bin_path), # bin_version ] if toolchain_bin_path else []) return {basename(export_target): { 'toolchain_bases': toolchain_bases, 'toolchain_bin': toolchain_bin, 'builder': '%s:%s' % ( entry_point.module_name, '.'.join(entry_point.attrs)), }}
python
def generate_metadata_entry(self, entry_point, toolchain, spec): """ After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file. """ export_target = spec['export_target'] toolchain_bases = trace_toolchain(toolchain) toolchain_bin_path = spec.get(TOOLCHAIN_BIN_PATH) toolchain_bin = ([ basename(toolchain_bin_path), # bin_name get_bin_version_str(toolchain_bin_path), # bin_version ] if toolchain_bin_path else []) return {basename(export_target): { 'toolchain_bases': toolchain_bases, 'toolchain_bin': toolchain_bin, 'builder': '%s:%s' % ( entry_point.module_name, '.'.join(entry_point.attrs)), }}
[ "def", "generate_metadata_entry", "(", "self", ",", "entry_point", ",", "toolchain", ",", "spec", ")", ":", "export_target", "=", "spec", "[", "'export_target'", "]", "toolchain_bases", "=", "trace_toolchain", "(", "toolchain", ")", "toolchain_bin_path", "=", "spe...
After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file.
[ "After", "the", "toolchain", "and", "spec", "have", "been", "executed", "this", "may", "be", "called", "to", "generate", "the", "artifact", "export", "entry", "for", "persistence", "into", "the", "metadata", "file", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L420-L440
train
45,752
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.iter_records_for
def iter_records_for(self, package_name): """ Iterate records for a specific package. """ entry_points = self.packages.get(package_name, NotImplemented) if entry_points is NotImplemented: logger.debug( "package '%s' has not declared any entry points for the '%s' " "registry for artifact construction", package_name, self.registry_name, ) return iter([]) logger.debug( "package '%s' has declared %d entry points for the '%s' " "registry for artifact construction", package_name, len(entry_points), self.registry_name, ) return iter(entry_points.values())
python
def iter_records_for(self, package_name): """ Iterate records for a specific package. """ entry_points = self.packages.get(package_name, NotImplemented) if entry_points is NotImplemented: logger.debug( "package '%s' has not declared any entry points for the '%s' " "registry for artifact construction", package_name, self.registry_name, ) return iter([]) logger.debug( "package '%s' has declared %d entry points for the '%s' " "registry for artifact construction", package_name, len(entry_points), self.registry_name, ) return iter(entry_points.values())
[ "def", "iter_records_for", "(", "self", ",", "package_name", ")", ":", "entry_points", "=", "self", ".", "packages", ".", "get", "(", "package_name", ",", "NotImplemented", ")", "if", "entry_points", "is", "NotImplemented", ":", "logger", ".", "debug", "(", ...
Iterate records for a specific package.
[ "Iterate", "records", "for", "a", "specific", "package", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L461-L480
train
45,753
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.generate_builder
def generate_builder(self, entry_point, export_target): """ Yields exactly one builder if both the provided entry point and export target satisfies the checks required. """ try: builder = entry_point.resolve() except ImportError: logger.error( "unable to import the target builder for the entry point " "'%s' from package '%s' to generate artifact '%s'", entry_point, entry_point.dist, export_target, ) return if not self.verify_builder(builder): logger.error( "the builder referenced by the entry point '%s' " "from package '%s' has an incompatible signature", entry_point, entry_point.dist, ) return # CLEANUP see deprecation notice below verifier = self.verify_export_target(export_target) if not verifier: logger.error( "the export target '%s' has been rejected", export_target) return toolchain, spec = self.extract_builder_result(builder( [entry_point.dist.project_name], export_target=export_target)) if not toolchain: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a valid " "toolchain", entry_point, entry_point.dist, ) return if spec.get(EXPORT_TARGET) != export_target: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a spec with the " "expected export_target", entry_point, entry_point.dist, ) return if callable(verifier): warnings.warn( "%s:%s.verify_export_target returned a callable, which " "will no longer be passed to spec.advise by calmjs-4.0.0; " "please instead override 'setup_export_location' or " "'prepare_export_location' in that class" % ( self.__class__.__module__, self.__class__.__name__), DeprecationWarning ) spec.advise(BEFORE_PREPARE, verifier, export_target) else: spec.advise( BEFORE_PREPARE, self.prepare_export_location, export_target) yield entry_point, toolchain, spec
python
def generate_builder(self, entry_point, export_target): """ Yields exactly one builder if both the provided entry point and export target satisfies the checks required. """ try: builder = entry_point.resolve() except ImportError: logger.error( "unable to import the target builder for the entry point " "'%s' from package '%s' to generate artifact '%s'", entry_point, entry_point.dist, export_target, ) return if not self.verify_builder(builder): logger.error( "the builder referenced by the entry point '%s' " "from package '%s' has an incompatible signature", entry_point, entry_point.dist, ) return # CLEANUP see deprecation notice below verifier = self.verify_export_target(export_target) if not verifier: logger.error( "the export target '%s' has been rejected", export_target) return toolchain, spec = self.extract_builder_result(builder( [entry_point.dist.project_name], export_target=export_target)) if not toolchain: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a valid " "toolchain", entry_point, entry_point.dist, ) return if spec.get(EXPORT_TARGET) != export_target: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a spec with the " "expected export_target", entry_point, entry_point.dist, ) return if callable(verifier): warnings.warn( "%s:%s.verify_export_target returned a callable, which " "will no longer be passed to spec.advise by calmjs-4.0.0; " "please instead override 'setup_export_location' or " "'prepare_export_location' in that class" % ( self.__class__.__module__, self.__class__.__name__), DeprecationWarning ) spec.advise(BEFORE_PREPARE, verifier, export_target) else: spec.advise( BEFORE_PREPARE, self.prepare_export_location, export_target) yield entry_point, toolchain, spec
[ "def", "generate_builder", "(", "self", ",", "entry_point", ",", "export_target", ")", ":", "try", ":", "builder", "=", "entry_point", ".", "resolve", "(", ")", "except", "ImportError", ":", "logger", ".", "error", "(", "\"unable to import the target builder for t...
Yields exactly one builder if both the provided entry point and export target satisfies the checks required.
[ "Yields", "exactly", "one", "builder", "if", "both", "the", "provided", "entry", "point", "and", "export", "target", "satisfies", "the", "checks", "required", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L520-L585
train
45,754
calmjs/calmjs
src/calmjs/artifact.py
BaseArtifactRegistry.execute_builder
def execute_builder(self, entry_point, toolchain, spec): """ Accepts the arguments provided by the builder and executes them. """ toolchain(spec) if not exists(spec['export_target']): logger.error( "the entry point '%s' from package '%s' failed to " "generate an artifact at '%s'", entry_point, entry_point.dist, spec['export_target'] ) return {} return self.generate_metadata_entry(entry_point, toolchain, spec)
python
def execute_builder(self, entry_point, toolchain, spec): """ Accepts the arguments provided by the builder and executes them. """ toolchain(spec) if not exists(spec['export_target']): logger.error( "the entry point '%s' from package '%s' failed to " "generate an artifact at '%s'", entry_point, entry_point.dist, spec['export_target'] ) return {} return self.generate_metadata_entry(entry_point, toolchain, spec)
[ "def", "execute_builder", "(", "self", ",", "entry_point", ",", "toolchain", ",", "spec", ")", ":", "toolchain", "(", "spec", ")", "if", "not", "exists", "(", "spec", "[", "'export_target'", "]", ")", ":", "logger", ".", "error", "(", "\"the entry point '%...
Accepts the arguments provided by the builder and executes them.
[ "Accepts", "the", "arguments", "provided", "by", "the", "builder", "and", "executes", "them", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L594-L607
train
45,755
calmjs/calmjs
src/calmjs/artifact.py
ArtifactRegistry.process_package
def process_package(self, package_name): """ Build artifacts declared for the given package. """ metadata = super(ArtifactRegistry, self).process_package(package_name) if metadata: self.update_artifact_metadata(package_name, metadata)
python
def process_package(self, package_name): """ Build artifacts declared for the given package. """ metadata = super(ArtifactRegistry, self).process_package(package_name) if metadata: self.update_artifact_metadata(package_name, metadata)
[ "def", "process_package", "(", "self", ",", "package_name", ")", ":", "metadata", "=", "super", "(", "ArtifactRegistry", ",", "self", ")", ".", "process_package", "(", "package_name", ")", "if", "metadata", ":", "self", ".", "update_artifact_metadata", "(", "p...
Build artifacts declared for the given package.
[ "Build", "artifacts", "declared", "for", "the", "given", "package", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/artifact.py#L622-L629
train
45,756
weso/CWR-DataApi
cwr/grammar/field/filename.py
alphanum_variable
def alphanum_variable(min_size, max_size, name=None): """ Creates the grammar for an alphanumeric code where the size ranges between two values. :param min_size: minimum size :param max_size: maximum size :param name: name for the field :return: grammar for an alphanumeric field of a variable size """ if name is None: name = 'Alphanumeric Field' if min_size < 0: # Can't have negative min raise BaseException() if max_size < min_size: # Max can't be lower than min raise BaseException() field = pp.Word(pp.alphanums, min=min_size, max=max_size) # Parse action field.setParseAction(lambda s: s[0].strip()) # White spaces are not removed field.leaveWhitespace() # Name field.setName(name) return field
python
def alphanum_variable(min_size, max_size, name=None): """ Creates the grammar for an alphanumeric code where the size ranges between two values. :param min_size: minimum size :param max_size: maximum size :param name: name for the field :return: grammar for an alphanumeric field of a variable size """ if name is None: name = 'Alphanumeric Field' if min_size < 0: # Can't have negative min raise BaseException() if max_size < min_size: # Max can't be lower than min raise BaseException() field = pp.Word(pp.alphanums, min=min_size, max=max_size) # Parse action field.setParseAction(lambda s: s[0].strip()) # White spaces are not removed field.leaveWhitespace() # Name field.setName(name) return field
[ "def", "alphanum_variable", "(", "min_size", ",", "max_size", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "'Alphanumeric Field'", "if", "min_size", "<", "0", ":", "# Can't have negative min", "raise", "BaseException", "(",...
Creates the grammar for an alphanumeric code where the size ranges between two values. :param min_size: minimum size :param max_size: maximum size :param name: name for the field :return: grammar for an alphanumeric field of a variable size
[ "Creates", "the", "grammar", "for", "an", "alphanumeric", "code", "where", "the", "size", "ranges", "between", "two", "values", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/filename.py#L17-L49
train
45,757
weso/CWR-DataApi
cwr/grammar/field/filename.py
year
def year(columns, name=None): """ Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return: """ if columns < 0: # Can't have negative size raise BaseException() field = numeric(columns, name) # Parse action field.addParseAction(_to_year) return field
python
def year(columns, name=None): """ Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return: """ if columns < 0: # Can't have negative size raise BaseException() field = numeric(columns, name) # Parse action field.addParseAction(_to_year) return field
[ "def", "year", "(", "columns", ",", "name", "=", "None", ")", ":", "if", "columns", "<", "0", ":", "# Can't have negative size", "raise", "BaseException", "(", ")", "field", "=", "numeric", "(", "columns", ",", "name", ")", "# Parse action", "field", ".", ...
Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return:
[ "Creates", "the", "grammar", "for", "a", "field", "containing", "a", "year", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/filename.py#L52-L70
train
45,758
calmjs/calmjs
src/calmjs/dist.py
is_json_compat
def is_json_compat(value): """ Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails. """ try: value = json.loads(value) except ValueError as e: raise ValueError('JSON decoding error: ' + str(e)) except TypeError: # Check that the value can be serialized back into json. try: json.dumps(value) except TypeError as e: raise ValueError( 'must be a JSON serializable object: ' + str(e)) if not isinstance(value, dict): raise ValueError( 'must be specified as a JSON serializable dict or a ' 'JSON deserializable string' ) return True
python
def is_json_compat(value): """ Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails. """ try: value = json.loads(value) except ValueError as e: raise ValueError('JSON decoding error: ' + str(e)) except TypeError: # Check that the value can be serialized back into json. try: json.dumps(value) except TypeError as e: raise ValueError( 'must be a JSON serializable object: ' + str(e)) if not isinstance(value, dict): raise ValueError( 'must be specified as a JSON serializable dict or a ' 'JSON deserializable string' ) return True
[ "def", "is_json_compat", "(", "value", ")", ":", "try", ":", "value", "=", "json", ".", "loads", "(", "value", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "'JSON decoding error: '", "+", "str", "(", "e", ")", ")", "except", ...
Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails.
[ "Check", "that", "the", "value", "is", "either", "a", "JSON", "decodable", "string", "or", "a", "dict", "that", "can", "be", "encoded", "into", "a", "JSON", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L37-L63
train
45,759
calmjs/calmjs
src/calmjs/dist.py
validate_json_field
def validate_json_field(dist, attr, value): """ Check for json validity. """ try: is_json_compat(value) except ValueError as e: raise DistutilsSetupError("%r %s" % (attr, e)) return True
python
def validate_json_field(dist, attr, value): """ Check for json validity. """ try: is_json_compat(value) except ValueError as e: raise DistutilsSetupError("%r %s" % (attr, e)) return True
[ "def", "validate_json_field", "(", "dist", ",", "attr", ",", "value", ")", ":", "try", ":", "is_json_compat", "(", "value", ")", "except", "ValueError", "as", "e", ":", "raise", "DistutilsSetupError", "(", "\"%r %s\"", "%", "(", "attr", ",", "e", ")", ")...
Check for json validity.
[ "Check", "for", "json", "validity", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L66-L76
train
45,760
calmjs/calmjs
src/calmjs/dist.py
validate_line_list
def validate_line_list(dist, attr, value): """ Validate that the value is compatible """ # does not work as reliably in Python 2. if isinstance(value, str): value = value.split() value = list(value) try: check = (' '.join(value)).split() if check == value: return True except Exception: pass raise DistutilsSetupError("%r must be a list of valid identifiers" % attr)
python
def validate_line_list(dist, attr, value): """ Validate that the value is compatible """ # does not work as reliably in Python 2. if isinstance(value, str): value = value.split() value = list(value) try: check = (' '.join(value)).split() if check == value: return True except Exception: pass raise DistutilsSetupError("%r must be a list of valid identifiers" % attr)
[ "def", "validate_line_list", "(", "dist", ",", "attr", ",", "value", ")", ":", "# does not work as reliably in Python 2.", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "split", "(", ")", "value", "=", "list", "(", "va...
Validate that the value is compatible
[ "Validate", "that", "the", "value", "is", "compatible" ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L79-L95
train
45,761
calmjs/calmjs
src/calmjs/dist.py
write_json_file
def write_json_file(argname, cmd, basename, filename): """ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. """ value = getattr(cmd.distribution, argname, None) if isinstance(value, dict): value = json.dumps( value, indent=4, sort_keys=True, separators=(',', ': ')) cmd.write_or_delete_file(argname, filename, value, force=True)
python
def write_json_file(argname, cmd, basename, filename): """ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. """ value = getattr(cmd.distribution, argname, None) if isinstance(value, dict): value = json.dumps( value, indent=4, sort_keys=True, separators=(',', ': ')) cmd.write_or_delete_file(argname, filename, value, force=True)
[ "def", "write_json_file", "(", "argname", ",", "cmd", ",", "basename", ",", "filename", ")", ":", "value", "=", "getattr", "(", "cmd", ".", "distribution", ",", "argname", ",", "None", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "valu...
Write JSON captured from the defined argname into the package's egg-info directory using the specified filename.
[ "Write", "JSON", "captured", "from", "the", "defined", "argname", "into", "the", "package", "s", "egg", "-", "info", "directory", "using", "the", "specified", "filename", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L98-L110
train
45,762
calmjs/calmjs
src/calmjs/dist.py
write_line_list
def write_line_list(argname, cmd, basename, filename): """ Write out the retrieved value as list of lines. """ values = getattr(cmd.distribution, argname, None) if isinstance(values, list): values = '\n'.join(values) cmd.write_or_delete_file(argname, filename, values, force=True)
python
def write_line_list(argname, cmd, basename, filename): """ Write out the retrieved value as list of lines. """ values = getattr(cmd.distribution, argname, None) if isinstance(values, list): values = '\n'.join(values) cmd.write_or_delete_file(argname, filename, values, force=True)
[ "def", "write_line_list", "(", "argname", ",", "cmd", ",", "basename", ",", "filename", ")", ":", "values", "=", "getattr", "(", "cmd", ".", "distribution", ",", "argname", ",", "None", ")", "if", "isinstance", "(", "values", ",", "list", ")", ":", "va...
Write out the retrieved value as list of lines.
[ "Write", "out", "the", "retrieved", "value", "as", "list", "of", "lines", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L113-L121
train
45,763
calmjs/calmjs
src/calmjs/dist.py
find_pkg_dist
def find_pkg_dist(pkg_name, working_set=None): """ Locate a package's distribution by its name. """ working_set = working_set or default_working_set req = Requirement.parse(pkg_name) return working_set.find(req)
python
def find_pkg_dist(pkg_name, working_set=None): """ Locate a package's distribution by its name. """ working_set = working_set or default_working_set req = Requirement.parse(pkg_name) return working_set.find(req)
[ "def", "find_pkg_dist", "(", "pkg_name", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "req", "=", "Requirement", ".", "parse", "(", "pkg_name", ")", "return", "working_set", ".", "find", "(", "req",...
Locate a package's distribution by its name.
[ "Locate", "a", "package", "s", "distribution", "by", "its", "name", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L124-L131
train
45,764
calmjs/calmjs
src/calmjs/dist.py
convert_package_names
def convert_package_names(package_names): """ Convert package names, which can be a string of a number of package names or requirements separated by spaces. """ results = [] errors = [] for name in ( package_names.split() if hasattr(package_names, 'split') else package_names): try: Requirement.parse(name) except ValueError: errors.append(name) else: results.append(name) return results, errors
python
def convert_package_names(package_names): """ Convert package names, which can be a string of a number of package names or requirements separated by spaces. """ results = [] errors = [] for name in ( package_names.split() if hasattr(package_names, 'split') else package_names): try: Requirement.parse(name) except ValueError: errors.append(name) else: results.append(name) return results, errors
[ "def", "convert_package_names", "(", "package_names", ")", ":", "results", "=", "[", "]", "errors", "=", "[", "]", "for", "name", "in", "(", "package_names", ".", "split", "(", ")", "if", "hasattr", "(", "package_names", ",", "'split'", ")", "else", "pac...
Convert package names, which can be a string of a number of package names or requirements separated by spaces.
[ "Convert", "package", "names", "which", "can", "be", "a", "string", "of", "a", "number", "of", "package", "names", "or", "requirements", "separated", "by", "spaces", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L134-L153
train
45,765
calmjs/calmjs
src/calmjs/dist.py
find_packages_requirements_dists
def find_packages_requirements_dists(pkg_names, working_set=None): """ Return the entire list of dependency requirements, reversed from the bottom. """ working_set = working_set or default_working_set requirements = [ r for r in (Requirement.parse(req) for req in pkg_names) if working_set.find(r) ] return list(reversed(working_set.resolve(requirements)))
python
def find_packages_requirements_dists(pkg_names, working_set=None): """ Return the entire list of dependency requirements, reversed from the bottom. """ working_set = working_set or default_working_set requirements = [ r for r in (Requirement.parse(req) for req in pkg_names) if working_set.find(r) ] return list(reversed(working_set.resolve(requirements)))
[ "def", "find_packages_requirements_dists", "(", "pkg_names", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "requirements", "=", "[", "r", "for", "r", "in", "(", "Requirement", ".", "parse", "(", "req",...
Return the entire list of dependency requirements, reversed from the bottom.
[ "Return", "the", "entire", "list", "of", "dependency", "requirements", "reversed", "from", "the", "bottom", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L162-L173
train
45,766
calmjs/calmjs
src/calmjs/dist.py
find_packages_parents_requirements_dists
def find_packages_parents_requirements_dists(pkg_names, working_set=None): """ Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names. """ dists = [] # opting for a naive implementation targets = set(pkg_names) for dist in find_packages_requirements_dists(pkg_names, working_set): if dist.project_name in targets: continue dists.append(dist) return dists
python
def find_packages_parents_requirements_dists(pkg_names, working_set=None): """ Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names. """ dists = [] # opting for a naive implementation targets = set(pkg_names) for dist in find_packages_requirements_dists(pkg_names, working_set): if dist.project_name in targets: continue dists.append(dist) return dists
[ "def", "find_packages_parents_requirements_dists", "(", "pkg_names", ",", "working_set", "=", "None", ")", ":", "dists", "=", "[", "]", "# opting for a naive implementation", "targets", "=", "set", "(", "pkg_names", ")", "for", "dist", "in", "find_packages_requirement...
Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names.
[ "Leverages", "the", "find_packages_requirements_dists", "but", "strip", "out", "the", "distributions", "that", "matches", "pkg_names", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L176-L189
train
45,767
calmjs/calmjs
src/calmjs/dist.py
read_dist_egginfo_json
def read_dist_egginfo_json(dist, filename=DEFAULT_JSON): """ Safely get a json within an egginfo from a distribution. """ # use the given package's distribution to acquire the json file. if not dist.has_metadata(filename): logger.debug("no '%s' for '%s'", filename, dist) return try: result = dist.get_metadata(filename) except IOError: logger.error("I/O error on reading of '%s' for '%s'.", filename, dist) return try: obj = json.loads(result) except (TypeError, ValueError): logger.error( "the '%s' found in '%s' is not a valid json.", filename, dist) return logger.debug("found '%s' for '%s'.", filename, dist) return obj
python
def read_dist_egginfo_json(dist, filename=DEFAULT_JSON): """ Safely get a json within an egginfo from a distribution. """ # use the given package's distribution to acquire the json file. if not dist.has_metadata(filename): logger.debug("no '%s' for '%s'", filename, dist) return try: result = dist.get_metadata(filename) except IOError: logger.error("I/O error on reading of '%s' for '%s'.", filename, dist) return try: obj = json.loads(result) except (TypeError, ValueError): logger.error( "the '%s' found in '%s' is not a valid json.", filename, dist) return logger.debug("found '%s' for '%s'.", filename, dist) return obj
[ "def", "read_dist_egginfo_json", "(", "dist", ",", "filename", "=", "DEFAULT_JSON", ")", ":", "# use the given package's distribution to acquire the json file.", "if", "not", "dist", ".", "has_metadata", "(", "filename", ")", ":", "logger", ".", "debug", "(", "\"no '%...
Safely get a json within an egginfo from a distribution.
[ "Safely", "get", "a", "json", "within", "an", "egginfo", "from", "a", "distribution", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L192-L216
train
45,768
calmjs/calmjs
src/calmjs/dist.py
read_egginfo_json
def read_egginfo_json(pkg_name, filename=DEFAULT_JSON, working_set=None): """ Read json from egginfo of a package identified by `pkg_name` that's already installed within the current Python environment. """ working_set = working_set or default_working_set dist = find_pkg_dist(pkg_name, working_set=working_set) return read_dist_egginfo_json(dist, filename)
python
def read_egginfo_json(pkg_name, filename=DEFAULT_JSON, working_set=None): """ Read json from egginfo of a package identified by `pkg_name` that's already installed within the current Python environment. """ working_set = working_set or default_working_set dist = find_pkg_dist(pkg_name, working_set=working_set) return read_dist_egginfo_json(dist, filename)
[ "def", "read_egginfo_json", "(", "pkg_name", ",", "filename", "=", "DEFAULT_JSON", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "dist", "=", "find_pkg_dist", "(", "pkg_name", ",", "working_set", "=", ...
Read json from egginfo of a package identified by `pkg_name` that's already installed within the current Python environment.
[ "Read", "json", "from", "egginfo", "of", "a", "package", "identified", "by", "pkg_name", "that", "s", "already", "installed", "within", "the", "current", "Python", "environment", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L219-L227
train
45,769
calmjs/calmjs
src/calmjs/dist.py
flatten_dist_egginfo_json
def flatten_dist_egginfo_json( source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested. """ working_set = working_set or default_working_set obj = {} # TODO figure out the best way to explicitly report back to caller # how the keys came to be (from which dist). Perhaps create a # detailed function based on this, retain this one to return the # distilled results. depends = {dep: {} for dep in dep_keys} # Go from the earliest package down to the latest one, as we will # flatten children's d(evD)ependencies on top of parent's. for dist in source_dists: obj = read_dist_egginfo_json(dist, filename) if not obj: continue logger.debug("merging '%s' for required '%s'", filename, dist) for dep in dep_keys: depends[dep].update(obj.get(dep, {})) if obj is None: # top level object does not have egg-info defined return depends for dep in dep_keys: # filtering out all the nulls. obj[dep] = {k: v for k, v in depends[dep].items() if v is not None} return obj
python
def flatten_dist_egginfo_json( source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested. """ working_set = working_set or default_working_set obj = {} # TODO figure out the best way to explicitly report back to caller # how the keys came to be (from which dist). Perhaps create a # detailed function based on this, retain this one to return the # distilled results. depends = {dep: {} for dep in dep_keys} # Go from the earliest package down to the latest one, as we will # flatten children's d(evD)ependencies on top of parent's. for dist in source_dists: obj = read_dist_egginfo_json(dist, filename) if not obj: continue logger.debug("merging '%s' for required '%s'", filename, dist) for dep in dep_keys: depends[dep].update(obj.get(dep, {})) if obj is None: # top level object does not have egg-info defined return depends for dep in dep_keys: # filtering out all the nulls. obj[dep] = {k: v for k, v in depends[dep].items() if v is not None} return obj
[ "def", "flatten_dist_egginfo_json", "(", "source_dists", ",", "filename", "=", "DEFAULT_JSON", ",", "dep_keys", "=", "DEP_KEYS", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "obj", "=", "{", "}", "# T...
Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested.
[ "Flatten", "a", "distribution", "s", "egginfo", "json", "with", "the", "depended", "keys", "to", "be", "flattened", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L245-L301
train
45,770
calmjs/calmjs
src/calmjs/dist.py
flatten_egginfo_json
def flatten_egginfo_json( pkg_names, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources). """ working_set = working_set or default_working_set # Ensure only grabbing packages that exists in working_set dists = find_packages_requirements_dists( pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=filename, dep_keys=dep_keys, working_set=working_set)
python
def flatten_egginfo_json( pkg_names, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources). """ working_set = working_set or default_working_set # Ensure only grabbing packages that exists in working_set dists = find_packages_requirements_dists( pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=filename, dep_keys=dep_keys, working_set=working_set)
[ "def", "flatten_egginfo_json", "(", "pkg_names", ",", "filename", "=", "DEFAULT_JSON", ",", "dep_keys", "=", "DEP_KEYS", ",", "working_set", "=", "None", ")", ":", "working_set", "=", "working_set", "or", "default_working_set", "# Ensure only grabbing packages that exis...
A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources).
[ "A", "shorthand", "calling", "convention", "where", "the", "package", "name", "is", "supplied", "instead", "of", "a", "distribution", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L304-L323
train
45,771
calmjs/calmjs
src/calmjs/dist.py
build_helpers_egginfo_json
def build_helpers_egginfo_json( json_field, json_key_registry, json_filename=None): """ Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field. """ json_filename = ( json_field + '.json' if json_filename is None else json_filename) # Default calmjs core implementation specific functions, to be used by # integrators intended to use this as a distribution. def get_extras_json(pkg_names, working_set=None): """ Only extract the extras_json information for the given packages 'pkg_names'. """ working_set = working_set or default_working_set dep_keys = set(get(json_key_registry).iter_records()) dists = pkg_names_to_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def _flatten_extras_json(pkg_names, find_dists, working_set): # registry key must be explicit here as it was designed for this. dep_keys = set(get(json_key_registry).iter_records()) dists = find_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def flatten_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_requirements_dists, working_set) def flatten_parents_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information for parents of the specified packages. """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_parents_requirements_dists, working_set) write_extras_json = partial(write_json_file, json_field) return ( get_extras_json, flatten_extras_json, flatten_parents_extras_json, write_extras_json, )
python
def build_helpers_egginfo_json( json_field, json_key_registry, json_filename=None): """ Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field. """ json_filename = ( json_field + '.json' if json_filename is None else json_filename) # Default calmjs core implementation specific functions, to be used by # integrators intended to use this as a distribution. def get_extras_json(pkg_names, working_set=None): """ Only extract the extras_json information for the given packages 'pkg_names'. """ working_set = working_set or default_working_set dep_keys = set(get(json_key_registry).iter_records()) dists = pkg_names_to_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def _flatten_extras_json(pkg_names, find_dists, working_set): # registry key must be explicit here as it was designed for this. dep_keys = set(get(json_key_registry).iter_records()) dists = find_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def flatten_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_requirements_dists, working_set) def flatten_parents_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information for parents of the specified packages. """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_parents_requirements_dists, working_set) write_extras_json = partial(write_json_file, json_field) return ( get_extras_json, flatten_extras_json, flatten_parents_extras_json, write_extras_json, )
[ "def", "build_helpers_egginfo_json", "(", "json_field", ",", "json_key_registry", ",", "json_filename", "=", "None", ")", ":", "json_filename", "=", "(", "json_field", "+", "'.json'", "if", "json_filename", "is", "None", "else", "json_filename", ")", "# Default calm...
Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field.
[ "Return", "a", "tuple", "of", "functions", "that", "will", "provide", "the", "usage", "of", "the", "JSON", "egginfo", "based", "around", "the", "provided", "field", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L326-L390
train
45,772
calmjs/calmjs
src/calmjs/dist.py
build_helpers_module_registry_dependencies
def build_helpers_module_registry_dependencies(registry_name='calmjs.module'): """ Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages. """ def get_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Get dependencies for the given package names from module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve the exported location for just the package. """ working_set = working_set or default_working_set registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return {} result = {} for pkg_name in pkg_names: result.update(registry.get_records_for_package(pkg_name)) return result def _flatten_module_registry_dependencies( pkg_names, registry_name, find_dists, working_set): """ Flatten dependencies for the given package names from module registry identified by registry name using the find_dists function on the given working_set. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ result = {} registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return result dists = find_dists(pkg_names, working_set=working_set) for dist in dists: result.update(registry.get_records_for_package(dist.project_name)) return result def flatten_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_requirements_dists, working_set) def flatten_parents_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the parents of the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_parents_requirements_dists, working_set) return ( get_module_registry_dependencies, flatten_module_registry_dependencies, flatten_parents_module_registry_dependencies, )
python
def build_helpers_module_registry_dependencies(registry_name='calmjs.module'): """ Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages. """ def get_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Get dependencies for the given package names from module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve the exported location for just the package. """ working_set = working_set or default_working_set registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return {} result = {} for pkg_name in pkg_names: result.update(registry.get_records_for_package(pkg_name)) return result def _flatten_module_registry_dependencies( pkg_names, registry_name, find_dists, working_set): """ Flatten dependencies for the given package names from module registry identified by registry name using the find_dists function on the given working_set. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ result = {} registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return result dists = find_dists(pkg_names, working_set=working_set) for dist in dists: result.update(registry.get_records_for_package(dist.project_name)) return result def flatten_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_requirements_dists, working_set) def flatten_parents_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the parents of the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_parents_requirements_dists, working_set) return ( get_module_registry_dependencies, flatten_module_registry_dependencies, flatten_parents_module_registry_dependencies, )
[ "def", "build_helpers_module_registry_dependencies", "(", "registry_name", "=", "'calmjs.module'", ")", ":", "def", "get_module_registry_dependencies", "(", "pkg_names", ",", "registry_name", "=", "registry_name", ",", "working_set", "=", "None", ")", ":", "\"\"\"\n ...
Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages.
[ "Return", "a", "tuple", "of", "funtions", "that", "will", "provide", "the", "functions", "that", "return", "the", "relevant", "sets", "of", "module", "registry", "records", "based", "on", "the", "dependencies", "defined", "for", "the", "provided", "packages", ...
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L398-L484
train
45,773
calmjs/calmjs
src/calmjs/dist.py
has_calmjs_artifact_declarations
def has_calmjs_artifact_declarations(cmd, registry_name='calmjs.artifacts'): """ For a distutils command to verify that the artifact build step is possible. """ return any(get(registry_name).iter_records_for( cmd.distribution.get_name()))
python
def has_calmjs_artifact_declarations(cmd, registry_name='calmjs.artifacts'): """ For a distutils command to verify that the artifact build step is possible. """ return any(get(registry_name).iter_records_for( cmd.distribution.get_name()))
[ "def", "has_calmjs_artifact_declarations", "(", "cmd", ",", "registry_name", "=", "'calmjs.artifacts'", ")", ":", "return", "any", "(", "get", "(", "registry_name", ")", ".", "iter_records_for", "(", "cmd", ".", "distribution", ".", "get_name", "(", ")", ")", ...
For a distutils command to verify that the artifact build step is possible.
[ "For", "a", "distutils", "command", "to", "verify", "that", "the", "artifact", "build", "step", "is", "possible", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L551-L558
train
45,774
calmjs/calmjs
src/calmjs/dist.py
build_calmjs_artifacts
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand): """ Trigger the artifact build process through the setuptools. """ if value is not True: return build_cmd = dist.get_command_obj('build') if not isinstance(build_cmd, cmdclass): logger.error( "'build' command in Distribution is not an instance of " "'%s:%s' (got %r instead)", cmdclass.__module__, cmdclass.__name__, build_cmd) return build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations))
python
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand): """ Trigger the artifact build process through the setuptools. """ if value is not True: return build_cmd = dist.get_command_obj('build') if not isinstance(build_cmd, cmdclass): logger.error( "'build' command in Distribution is not an instance of " "'%s:%s' (got %r instead)", cmdclass.__module__, cmdclass.__name__, build_cmd) return build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations))
[ "def", "build_calmjs_artifacts", "(", "dist", ",", "key", ",", "value", ",", "cmdclass", "=", "BuildCommand", ")", ":", "if", "value", "is", "not", "True", ":", "return", "build_cmd", "=", "dist", ".", "get_command_obj", "(", "'build'", ")", "if", "not", ...
Trigger the artifact build process through the setuptools.
[ "Trigger", "the", "artifact", "build", "process", "through", "the", "setuptools", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L561-L577
train
45,775
weso/CWR-DataApi
cwr/grammar/factory/rule.py
FieldRuleFactory.get_rule
def get_rule(self, field_id): """ Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field """ if field_id in self._fields: # Field already exists field = self._fields[field_id] else: # Field does not exist # It is created field = self._create_field(field_id) # Field is saved self._fields[field_id] = field return field
python
def get_rule(self, field_id): """ Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field """ if field_id in self._fields: # Field already exists field = self._fields[field_id] else: # Field does not exist # It is created field = self._create_field(field_id) # Field is saved self._fields[field_id] = field return field
[ "def", "get_rule", "(", "self", ",", "field_id", ")", ":", "if", "field_id", "in", "self", ".", "_fields", ":", "# Field already exists", "field", "=", "self", ".", "_fields", "[", "field_id", "]", "else", ":", "# Field does not exist", "# It is created", "fie...
Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field
[ "Returns", "the", "rule", "for", "the", "field", "identified", "by", "the", "id", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/factory/rule.py#L41-L63
train
45,776
weso/CWR-DataApi
cwr/grammar/factory/rule.py
FieldRuleFactory._create_field
def _create_field(self, field_id): """ Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field """ # Field configuration info config = self._field_configs[field_id] adapter = self._adapters[config['type']] if 'name' in config: name = config['name'] else: name = None if 'size' in config: columns = config['size'] else: columns = None if 'values' in config: values = config['values'] else: values = None field = adapter.get_field(name, columns, values) if 'results_name' in config: field = field.setResultsName(config['results_name']) else: field = field.setResultsName(field_id) return field
python
def _create_field(self, field_id): """ Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field """ # Field configuration info config = self._field_configs[field_id] adapter = self._adapters[config['type']] if 'name' in config: name = config['name'] else: name = None if 'size' in config: columns = config['size'] else: columns = None if 'values' in config: values = config['values'] else: values = None field = adapter.get_field(name, columns, values) if 'results_name' in config: field = field.setResultsName(config['results_name']) else: field = field.setResultsName(field_id) return field
[ "def", "_create_field", "(", "self", ",", "field_id", ")", ":", "# Field configuration info", "config", "=", "self", ".", "_field_configs", "[", "field_id", "]", "adapter", "=", "self", ".", "_adapters", "[", "config", "[", "'type'", "]", "]", "if", "'name'"...
Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field
[ "Creates", "the", "field", "with", "the", "specified", "parameters", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/factory/rule.py#L65-L99
train
45,777
weso/CWR-DataApi
data_cwr/accessor.py
_FileReader.read_csv_file
def read_csv_file(self, file_name): """ Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents """ result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
python
def read_csv_file(self, file_name): """ Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents """ result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
[ "def", "read_csv_file", "(", "self", ",", "file_name", ")", ":", "result", "=", "[", "]", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "file_name", ")", "...
Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents
[ "Parses", "a", "CSV", "file", "into", "a", "list", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/data_cwr/accessor.py#L39-L53
train
45,778
weso/CWR-DataApi
data_cwr/accessor.py
_FileReader.read_yaml_file
def read_yaml_file(self, file_name): """ Parses a YAML file into a matrix. :param file_name: name of the YAML file :return: a matrix with the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as yamlfile: return yaml.load(yamlfile)
python
def read_yaml_file(self, file_name): """ Parses a YAML file into a matrix. :param file_name: name of the YAML file :return: a matrix with the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as yamlfile: return yaml.load(yamlfile)
[ "def", "read_yaml_file", "(", "self", ",", "file_name", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "file_name", ")", ")", ",", "'rt'", ")", "a...
Parses a YAML file into a matrix. :param file_name: name of the YAML file :return: a matrix with the file's contents
[ "Parses", "a", "YAML", "file", "into", "a", "matrix", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/data_cwr/accessor.py#L55-L64
train
45,779
weso/CWR-DataApi
data_cwr/accessor.py
CWRTables.get_data
def get_data(self, file_id): """ Acquires the data from the table identified by the id. The file is read only once, consecutive calls to this method will return the sale collection. :param file_id: identifier for the table :return: all the values from the table """ if file_id not in self._file_values: file_contents = 'cwr_%s.csv' % file_id self._file_values[file_id] = self._reader.read_csv_file( file_contents) return self._file_values[file_id]
python
def get_data(self, file_id): """ Acquires the data from the table identified by the id. The file is read only once, consecutive calls to this method will return the sale collection. :param file_id: identifier for the table :return: all the values from the table """ if file_id not in self._file_values: file_contents = 'cwr_%s.csv' % file_id self._file_values[file_id] = self._reader.read_csv_file( file_contents) return self._file_values[file_id]
[ "def", "get_data", "(", "self", ",", "file_id", ")", ":", "if", "file_id", "not", "in", "self", ".", "_file_values", ":", "file_contents", "=", "'cwr_%s.csv'", "%", "file_id", "self", ".", "_file_values", "[", "file_id", "]", "=", "self", ".", "_reader", ...
Acquires the data from the table identified by the id. The file is read only once, consecutive calls to this method will return the sale collection. :param file_id: identifier for the table :return: all the values from the table
[ "Acquires", "the", "data", "from", "the", "table", "identified", "by", "the", "id", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/data_cwr/accessor.py#L82-L97
train
45,780
weso/CWR-DataApi
cwr/grammar/field/record.py
record_type
def record_type(values): """ Creates a record type field. These serve as the header field on records, identifying them. Usually this field can be only an specific value, but sometimes a small range of codes is allowed. This is specified by the 'values' parameter. While it is possible to set this field as optional, it is expected to be compulsory. :param values: allowed record type codes :return: grammar for the record type field """ field = basic.lookup(values, name='Record Type (one of %s)' % values) return field.setResultsName('record_type')
python
def record_type(values): """ Creates a record type field. These serve as the header field on records, identifying them. Usually this field can be only an specific value, but sometimes a small range of codes is allowed. This is specified by the 'values' parameter. While it is possible to set this field as optional, it is expected to be compulsory. :param values: allowed record type codes :return: grammar for the record type field """ field = basic.lookup(values, name='Record Type (one of %s)' % values) return field.setResultsName('record_type')
[ "def", "record_type", "(", "values", ")", ":", "field", "=", "basic", ".", "lookup", "(", "values", ",", "name", "=", "'Record Type (one of %s)'", "%", "values", ")", "return", "field", ".", "setResultsName", "(", "'record_type'", ")" ]
Creates a record type field. These serve as the header field on records, identifying them. Usually this field can be only an specific value, but sometimes a small range of codes is allowed. This is specified by the 'values' parameter. While it is possible to set this field as optional, it is expected to be compulsory. :param values: allowed record type codes :return: grammar for the record type field
[ "Creates", "a", "record", "type", "field", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/record.py#L25-L42
train
45,781
weso/CWR-DataApi
cwr/grammar/field/record.py
record_prefix
def record_prefix(required_type, factory): """ Creates a record prefix for the specified record type. :param required_type: the type of the record using this prefix :param factory: field factory :return: the record prefix """ field = record_type(required_type) field += factory.get_rule('transaction_sequence_n') field += factory.get_rule('record_sequence_n') # field.leaveWhitespace() return field
python
def record_prefix(required_type, factory): """ Creates a record prefix for the specified record type. :param required_type: the type of the record using this prefix :param factory: field factory :return: the record prefix """ field = record_type(required_type) field += factory.get_rule('transaction_sequence_n') field += factory.get_rule('record_sequence_n') # field.leaveWhitespace() return field
[ "def", "record_prefix", "(", "required_type", ",", "factory", ")", ":", "field", "=", "record_type", "(", "required_type", ")", "field", "+=", "factory", ".", "get_rule", "(", "'transaction_sequence_n'", ")", "field", "+=", "factory", ".", "get_rule", "(", "'r...
Creates a record prefix for the specified record type. :param required_type: the type of the record using this prefix :param factory: field factory :return: the record prefix
[ "Creates", "a", "record", "prefix", "for", "the", "specified", "record", "type", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/record.py#L46-L60
train
45,782
weso/CWR-DataApi
config_cwr/accessor.py
_FileReader.read_config_file
def read_config_file(self, file_name): """ Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as file_config: return self._parser.parseString(file_config.read())
python
def read_config_file(self, file_name): """ Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as file_config: return self._parser.parseString(file_config.read())
[ "def", "read_config_file", "(", "self", ",", "file_name", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__path", "(", ")", ",", "os", ".", "path", ".", "basename", "(", "file_name", ")", ")", ",", "'rt'", ")", ...
Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents
[ "Reads", "a", "CWR", "grammar", "config", "file", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/config_cwr/accessor.py#L48-L57
train
45,783
weso/CWR-DataApi
config_cwr/accessor.py
CWRConfiguration._load_cwr_defaults
def _load_cwr_defaults(self): """ Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix """ if self._cwr_defaults is None: self._cwr_defaults = self._reader.read_yaml_file( self._file_defaults) return self._cwr_defaults
python
def _load_cwr_defaults(self): """ Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix """ if self._cwr_defaults is None: self._cwr_defaults = self._reader.read_yaml_file( self._file_defaults) return self._cwr_defaults
[ "def", "_load_cwr_defaults", "(", "self", ")", ":", "if", "self", ".", "_cwr_defaults", "is", "None", ":", "self", ".", "_cwr_defaults", "=", "self", ".", "_reader", ".", "read_yaml_file", "(", "self", ".", "_file_defaults", ")", "return", "self", ".", "_c...
Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix
[ "Loads", "the", "CWR", "default", "values", "file", "creating", "a", "matrix", "from", "it", "and", "then", "returns", "this", "data", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/config_cwr/accessor.py#L82-L95
train
45,784
calmjs/calmjs
src/calmjs/argparse.py
ArgumentParser.soft_error
def soft_error(self, message): """ Same as error, without the dying in a fire part. """ self.print_usage(sys.stderr) args = {'prog': self.prog, 'message': message} self._print_message( _('%(prog)s: error: %(message)s\n') % args, sys.stderr)
python
def soft_error(self, message): """ Same as error, without the dying in a fire part. """ self.print_usage(sys.stderr) args = {'prog': self.prog, 'message': message} self._print_message( _('%(prog)s: error: %(message)s\n') % args, sys.stderr)
[ "def", "soft_error", "(", "self", ",", "message", ")", ":", "self", ".", "print_usage", "(", "sys", ".", "stderr", ")", "args", "=", "{", "'prog'", ":", "self", ".", "prog", ",", "'message'", ":", "message", "}", "self", ".", "_print_message", "(", "...
Same as error, without the dying in a fire part.
[ "Same", "as", "error", "without", "the", "dying", "in", "a", "fire", "part", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/argparse.py#L262-L270
train
45,785
weso/CWR-DataApi
cwr/parser/decoder/file.py
default_filename_decoder
def default_filename_decoder(): """ Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions """ factory = default_filename_grammar_factory() grammar_old = factory.get_rule('filename_old') grammar_new = factory.get_rule('filename_new') return FileNameDecoder(grammar_old, grammar_new)
python
def default_filename_decoder(): """ Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions """ factory = default_filename_grammar_factory() grammar_old = factory.get_rule('filename_old') grammar_new = factory.get_rule('filename_new') return FileNameDecoder(grammar_old, grammar_new)
[ "def", "default_filename_decoder", "(", ")", ":", "factory", "=", "default_filename_grammar_factory", "(", ")", "grammar_old", "=", "factory", ".", "get_rule", "(", "'filename_old'", ")", "grammar_new", "=", "factory", ".", "get_rule", "(", "'filename_new'", ")", ...
Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions
[ "Creates", "a", "decoder", "which", "parses", "CWR", "filenames", "following", "the", "old", "or", "the", "new", "convention", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/decoder/file.py#L252-L264
train
45,786
weso/CWR-DataApi
cwr/parser/decoder/file.py
FileDecoder.decode
def decode(self, data): """ Parses the file, creating a CWRFile from it. It requires a dictionary with two values: - filename, containing the filename - contents, containing the file contents :param data: dictionary with the data to parse :return: a CWRFile instance """ file_name = self._filename_decoder.decode(data['filename']) file_data = data['contents'] i = 0 max_size = len(file_data) while file_data[i:i + 1] != 'H' and i < max_size: i += 1 if i > 0: data['contents'] = file_data[i:] transmission = self._file_decoder.decode(data['contents'])[0] return CWRFile(file_name, transmission)
python
def decode(self, data): """ Parses the file, creating a CWRFile from it. It requires a dictionary with two values: - filename, containing the filename - contents, containing the file contents :param data: dictionary with the data to parse :return: a CWRFile instance """ file_name = self._filename_decoder.decode(data['filename']) file_data = data['contents'] i = 0 max_size = len(file_data) while file_data[i:i + 1] != 'H' and i < max_size: i += 1 if i > 0: data['contents'] = file_data[i:] transmission = self._file_decoder.decode(data['contents'])[0] return CWRFile(file_name, transmission)
[ "def", "decode", "(", "self", ",", "data", ")", ":", "file_name", "=", "self", ".", "_filename_decoder", ".", "decode", "(", "data", "[", "'filename'", "]", ")", "file_data", "=", "data", "[", "'contents'", "]", "i", "=", "0", "max_size", "=", "len", ...
Parses the file, creating a CWRFile from it. It requires a dictionary with two values: - filename, containing the filename - contents, containing the file contents :param data: dictionary with the data to parse :return: a CWRFile instance
[ "Parses", "the", "file", "creating", "a", "CWRFile", "from", "it", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/decoder/file.py#L287-L310
train
45,787
weso/CWR-DataApi
cwr/parser/decoder/file.py
FileNameDecoder.decode
def decode(self, file_name): """ Parses the filename, creating a FileTag from it. It will try both the old and the new conventions, if the filename does not conform any of them, then an empty FileTag will be returned. :param file_name: filename to parse :return: a FileTag instance """ try: file_tag = self._filename_decoder_new.decode(file_name) except: try: file_tag = self._filename_decoder_old.decode(file_name) except: file_tag = FileTag(0, 0, '', '', '') return file_tag
python
def decode(self, file_name): """ Parses the filename, creating a FileTag from it. It will try both the old and the new conventions, if the filename does not conform any of them, then an empty FileTag will be returned. :param file_name: filename to parse :return: a FileTag instance """ try: file_tag = self._filename_decoder_new.decode(file_name) except: try: file_tag = self._filename_decoder_old.decode(file_name) except: file_tag = FileTag(0, 0, '', '', '') return file_tag
[ "def", "decode", "(", "self", ",", "file_name", ")", ":", "try", ":", "file_tag", "=", "self", ".", "_filename_decoder_new", ".", "decode", "(", "file_name", ")", "except", ":", "try", ":", "file_tag", "=", "self", ".", "_filename_decoder_old", ".", "decod...
Parses the filename, creating a FileTag from it. It will try both the old and the new conventions, if the filename does not conform any of them, then an empty FileTag will be returned. :param file_name: filename to parse :return: a FileTag instance
[ "Parses", "the", "filename", "creating", "a", "FileTag", "from", "it", "." ]
f3b6ba8308c901b6ab87073c155c08e30692333c
https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/parser/decoder/file.py#L329-L347
train
45,788
calmjs/calmjs
src/calmjs/utils.py
enable_pretty_logging
def enable_pretty_logging(logger='calmjs', level=logging.DEBUG, stream=None): """ Shorthand to enable pretty logging """ def cleanup(): logger.removeHandler(handler) logger.level = old_level if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) old_level = logger.level handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter( u'%(asctime)s %(levelname)s %(name)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) return cleanup
python
def enable_pretty_logging(logger='calmjs', level=logging.DEBUG, stream=None): """ Shorthand to enable pretty logging """ def cleanup(): logger.removeHandler(handler) logger.level = old_level if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) old_level = logger.level handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter( u'%(asctime)s %(levelname)s %(name)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) return cleanup
[ "def", "enable_pretty_logging", "(", "logger", "=", "'calmjs'", ",", "level", "=", "logging", ".", "DEBUG", ",", "stream", "=", "None", ")", ":", "def", "cleanup", "(", ")", ":", "logger", ".", "removeHandler", "(", "handler", ")", "logger", ".", "level"...
Shorthand to enable pretty logging
[ "Shorthand", "to", "enable", "pretty", "logging" ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/utils.py#L53-L71
train
45,789
calmjs/calmjs
src/calmjs/utils.py
finalize_env
def finalize_env(env): """ Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env. """ keys = _PLATFORM_ENV_KEYS.get(sys.platform, []) if 'PATH' not in keys: # this MUST be available due to Node.js (and others really) # needing something to look for binary locations when it shells # out to other binaries. keys.append('PATH') results = { key: os.environ.get(key, '') for key in keys } results.update(env) return results
python
def finalize_env(env): """ Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env. """ keys = _PLATFORM_ENV_KEYS.get(sys.platform, []) if 'PATH' not in keys: # this MUST be available due to Node.js (and others really) # needing something to look for binary locations when it shells # out to other binaries. keys.append('PATH') results = { key: os.environ.get(key, '') for key in keys } results.update(env) return results
[ "def", "finalize_env", "(", "env", ")", ":", "keys", "=", "_PLATFORM_ENV_KEYS", ".", "get", "(", "sys", ".", "platform", ",", "[", "]", ")", "if", "'PATH'", "not", "in", "keys", ":", "# this MUST be available due to Node.js (and others really)", "# needing somethi...
Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env.
[ "Produce", "a", "platform", "specific", "env", "for", "passing", "into", "subprocess", ".", "Popen", "family", "of", "external", "process", "calling", "methods", "and", "the", "supplied", "env", "will", "be", "updated", "on", "top", "of", "it", ".", "Returns...
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/utils.py#L83-L100
train
45,790
calmjs/calmjs
src/calmjs/utils.py
fork_exec
def fork_exec(args, stdin='', **kwargs): """ Do a fork-exec through the subprocess.Popen abstraction in a way that takes a stdin and return stdout. """ as_bytes = isinstance(stdin, bytes) source = stdin if as_bytes else stdin.encode(locale) p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs) stdout, stderr = p.communicate(source) if as_bytes: return stdout, stderr return (stdout.decode(locale), stderr.decode(locale))
python
def fork_exec(args, stdin='', **kwargs): """ Do a fork-exec through the subprocess.Popen abstraction in a way that takes a stdin and return stdout. """ as_bytes = isinstance(stdin, bytes) source = stdin if as_bytes else stdin.encode(locale) p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs) stdout, stderr = p.communicate(source) if as_bytes: return stdout, stderr return (stdout.decode(locale), stderr.decode(locale))
[ "def", "fork_exec", "(", "args", ",", "stdin", "=", "''", ",", "*", "*", "kwargs", ")", ":", "as_bytes", "=", "isinstance", "(", "stdin", ",", "bytes", ")", "source", "=", "stdin", "if", "as_bytes", "else", "stdin", ".", "encode", "(", "locale", ")",...
Do a fork-exec through the subprocess.Popen abstraction in a way that takes a stdin and return stdout.
[ "Do", "a", "fork", "-", "exec", "through", "the", "subprocess", ".", "Popen", "abstraction", "in", "a", "way", "that", "takes", "a", "stdin", "and", "return", "stdout", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/utils.py#L103-L115
train
45,791
calmjs/calmjs
src/calmjs/utils.py
raise_os_error
def raise_os_error(_errno, path=None): """ Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2.7. """ msg = "%s: '%s'" % (strerror(_errno), path) if path else strerror(_errno) raise OSError(_errno, msg)
python
def raise_os_error(_errno, path=None): """ Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2.7. """ msg = "%s: '%s'" % (strerror(_errno), path) if path else strerror(_errno) raise OSError(_errno, msg)
[ "def", "raise_os_error", "(", "_errno", ",", "path", "=", "None", ")", ":", "msg", "=", "\"%s: '%s'\"", "%", "(", "strerror", "(", "_errno", ")", ",", "path", ")", "if", "path", "else", "strerror", "(", "_errno", ")", "raise", "OSError", "(", "_errno",...
Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2.7.
[ "Helper", "for", "raising", "the", "correct", "exception", "under", "Python", "3", "while", "still", "being", "able", "to", "raise", "the", "same", "common", "exception", "class", "in", "Python", "2", ".", "7", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/utils.py#L118-L125
train
45,792
calmjs/calmjs
src/calmjs/utils.py
which
def which(cmd, mode=os.F_OK | os.X_OK, path=None): """ Given cmd, check where it is on PATH. Loosely based on the version in python 3.3. """ if os.path.dirname(cmd): if os.path.isfile(cmd) and os.access(cmd, mode): return cmd if path is None: path = os.environ.get('PATH', defpath) if not path: return None paths = path.split(pathsep) if sys.platform == 'win32': # oh boy if curdir not in paths: paths = [curdir] + paths # also need to check the fileexts... pathext = os.environ.get('PATHEXT', '').split(pathsep) if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # sanity files = [cmd] seen = set() for p in paths: normpath = normcase(p) if normpath in seen: continue seen.add(normpath) for f in files: fn = os.path.join(p, f) if os.path.isfile(fn) and os.access(fn, mode): return fn return None
python
def which(cmd, mode=os.F_OK | os.X_OK, path=None): """ Given cmd, check where it is on PATH. Loosely based on the version in python 3.3. """ if os.path.dirname(cmd): if os.path.isfile(cmd) and os.access(cmd, mode): return cmd if path is None: path = os.environ.get('PATH', defpath) if not path: return None paths = path.split(pathsep) if sys.platform == 'win32': # oh boy if curdir not in paths: paths = [curdir] + paths # also need to check the fileexts... pathext = os.environ.get('PATHEXT', '').split(pathsep) if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # sanity files = [cmd] seen = set() for p in paths: normpath = normcase(p) if normpath in seen: continue seen.add(normpath) for f in files: fn = os.path.join(p, f) if os.path.isfile(fn) and os.access(fn, mode): return fn return None
[ "def", "which", "(", "cmd", ",", "mode", "=", "os", ".", "F_OK", "|", "os", ".", "X_OK", ",", "path", "=", "None", ")", ":", "if", "os", ".", "path", ".", "dirname", "(", "cmd", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "cmd", ...
Given cmd, check where it is on PATH. Loosely based on the version in python 3.3.
[ "Given", "cmd", "check", "where", "it", "is", "on", "PATH", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/utils.py#L128-L173
train
45,793
calmjs/calmjs
src/calmjs/registry.py
Registry._init
def _init(self): """ Turn the records into actual usable keys. """ self._entry_points = {} for entry_point in self.raw_entry_points: if entry_point.dist.project_name != self.reserved.get( entry_point.name, entry_point.dist.project_name): logger.error( "registry '%s' for '%s' is reserved for package '%s'", entry_point.name, self.registry_name, self.reserved[entry_point.name], ) continue if self.get_record(entry_point.name): logger.warning( "registry '%s' for '%s' is already registered.", entry_point.name, self.registry_name, ) existing = self._entry_points[entry_point.name] logger.debug( "registered '%s' from '%s'", existing, existing.dist) logger.debug( "discarded '%s' from '%s'", entry_point, entry_point.dist) continue logger.debug( "recording '%s' from '%s'", entry_point, entry_point.dist) self._entry_points[entry_point.name] = entry_point
python
def _init(self): """ Turn the records into actual usable keys. """ self._entry_points = {} for entry_point in self.raw_entry_points: if entry_point.dist.project_name != self.reserved.get( entry_point.name, entry_point.dist.project_name): logger.error( "registry '%s' for '%s' is reserved for package '%s'", entry_point.name, self.registry_name, self.reserved[entry_point.name], ) continue if self.get_record(entry_point.name): logger.warning( "registry '%s' for '%s' is already registered.", entry_point.name, self.registry_name, ) existing = self._entry_points[entry_point.name] logger.debug( "registered '%s' from '%s'", existing, existing.dist) logger.debug( "discarded '%s' from '%s'", entry_point, entry_point.dist) continue logger.debug( "recording '%s' from '%s'", entry_point, entry_point.dist) self._entry_points[entry_point.name] = entry_point
[ "def", "_init", "(", "self", ")", ":", "self", ".", "_entry_points", "=", "{", "}", "for", "entry_point", "in", "self", ".", "raw_entry_points", ":", "if", "entry_point", ".", "dist", ".", "project_name", "!=", "self", ".", "reserved", ".", "get", "(", ...
Turn the records into actual usable keys.
[ "Turn", "the", "records", "into", "actual", "usable", "keys", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/registry.py#L66-L96
train
45,794
calmjs/calmjs
src/calmjs/toolchain.py
toolchain_spec_compile_entries
def toolchain_spec_compile_entries( toolchain, spec, entries, process_name, overwrite_log=None): """ The standardized Toolchain Spec Entries compile function This function accepts a toolchain instance, the spec to be operated with and the entries provided for the process name. The standard flow is to deferr the actual processing to the toolchain method `compile_{process_name}_entry` for each entry in the entries list. The generic compile entries method for the compile process. Arguments: toolchain The toolchain to be used for the operation. spec The spec to be operated with. entries The entries for the source. process_name The name of the specific compile process of the provided toolchain. overwrite_log A callable that will accept a 4-tuple of suffix, key, original and new value, if monitoring of overwritten values are required. suffix is derived from the modpath_suffix or targetpath_suffix of the toolchain instance, key is the key on any of the keys on either of those mappings, original and new are the original and the replacement value. """ processor = getattr(toolchain, 'compile_%s_entry' % process_name) modpath_logger = ( partial(overwrite_log, toolchain.modpath_suffix) if callable(overwrite_log) else None) targetpath_logger = ( partial(overwrite_log, toolchain.targetpath_suffix) if callable(overwrite_log) else None) return process_compile_entries( processor, spec, entries, modpath_logger, targetpath_logger)
python
def toolchain_spec_compile_entries( toolchain, spec, entries, process_name, overwrite_log=None): """ The standardized Toolchain Spec Entries compile function This function accepts a toolchain instance, the spec to be operated with and the entries provided for the process name. The standard flow is to deferr the actual processing to the toolchain method `compile_{process_name}_entry` for each entry in the entries list. The generic compile entries method for the compile process. Arguments: toolchain The toolchain to be used for the operation. spec The spec to be operated with. entries The entries for the source. process_name The name of the specific compile process of the provided toolchain. overwrite_log A callable that will accept a 4-tuple of suffix, key, original and new value, if monitoring of overwritten values are required. suffix is derived from the modpath_suffix or targetpath_suffix of the toolchain instance, key is the key on any of the keys on either of those mappings, original and new are the original and the replacement value. """ processor = getattr(toolchain, 'compile_%s_entry' % process_name) modpath_logger = ( partial(overwrite_log, toolchain.modpath_suffix) if callable(overwrite_log) else None) targetpath_logger = ( partial(overwrite_log, toolchain.targetpath_suffix) if callable(overwrite_log) else None) return process_compile_entries( processor, spec, entries, modpath_logger, targetpath_logger)
[ "def", "toolchain_spec_compile_entries", "(", "toolchain", ",", "spec", ",", "entries", ",", "process_name", ",", "overwrite_log", "=", "None", ")", ":", "processor", "=", "getattr", "(", "toolchain", ",", "'compile_%s_entry'", "%", "process_name", ")", "modpath_l...
The standardized Toolchain Spec Entries compile function This function accepts a toolchain instance, the spec to be operated with and the entries provided for the process name. The standard flow is to deferr the actual processing to the toolchain method `compile_{process_name}_entry` for each entry in the entries list. The generic compile entries method for the compile process. Arguments: toolchain The toolchain to be used for the operation. spec The spec to be operated with. entries The entries for the source. process_name The name of the specific compile process of the provided toolchain. overwrite_log A callable that will accept a 4-tuple of suffix, key, original and new value, if monitoring of overwritten values are required. suffix is derived from the modpath_suffix or targetpath_suffix of the toolchain instance, key is the key on any of the keys on either of those mappings, original and new are the original and the replacement value.
[ "The", "standardized", "Toolchain", "Spec", "Entries", "compile", "function" ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L486-L526
train
45,795
calmjs/calmjs
src/calmjs/toolchain.py
process_compile_entries
def process_compile_entries( processor, spec, entries, modpath_logger=None, targetpath_logger=None): """ The generalized raw spec entry process invocation loop. """ # Contains a mapping of the module name to the compiled file's # relative path starting from the base build_dir. all_modpaths = {} all_targets = {} # List of exported module names, should be equal to all keys of # the compiled and bundled sources. all_export_module_names = [] def update(base, fresh, logger): if callable(logger): for dupes in dict_update_overwrite_check(base, fresh): logger(*dupes) else: base.update(fresh) for entry in entries: modpaths, targetpaths, export_module_names = processor(spec, entry) update(all_modpaths, modpaths, modpath_logger) update(all_targets, targetpaths, targetpath_logger) all_export_module_names.extend(export_module_names) return all_modpaths, all_targets, all_export_module_names
python
def process_compile_entries( processor, spec, entries, modpath_logger=None, targetpath_logger=None): """ The generalized raw spec entry process invocation loop. """ # Contains a mapping of the module name to the compiled file's # relative path starting from the base build_dir. all_modpaths = {} all_targets = {} # List of exported module names, should be equal to all keys of # the compiled and bundled sources. all_export_module_names = [] def update(base, fresh, logger): if callable(logger): for dupes in dict_update_overwrite_check(base, fresh): logger(*dupes) else: base.update(fresh) for entry in entries: modpaths, targetpaths, export_module_names = processor(spec, entry) update(all_modpaths, modpaths, modpath_logger) update(all_targets, targetpaths, targetpath_logger) all_export_module_names.extend(export_module_names) return all_modpaths, all_targets, all_export_module_names
[ "def", "process_compile_entries", "(", "processor", ",", "spec", ",", "entries", ",", "modpath_logger", "=", "None", ",", "targetpath_logger", "=", "None", ")", ":", "# Contains a mapping of the module name to the compiled file's", "# relative path starting from the base build_...
The generalized raw spec entry process invocation loop.
[ "The", "generalized", "raw", "spec", "entry", "process", "invocation", "loop", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L529-L556
train
45,796
calmjs/calmjs
src/calmjs/toolchain.py
Spec.update_selected
def update_selected(self, other, selected): """ Like update, however a list of selected keys must be provided. """ self.update({k: other[k] for k in selected})
python
def update_selected(self, other, selected): """ Like update, however a list of selected keys must be provided. """ self.update({k: other[k] for k in selected})
[ "def", "update_selected", "(", "self", ",", "other", ",", "selected", ")", ":", "self", ".", "update", "(", "{", "k", ":", "other", "[", "k", "]", "for", "k", "in", "selected", "}", ")" ]
Like update, however a list of selected keys must be provided.
[ "Like", "update", "however", "a", "list", "of", "selected", "keys", "must", "be", "provided", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L639-L644
train
45,797
calmjs/calmjs
src/calmjs/toolchain.py
Spec.advise
def advise(self, name, f, *a, **kw): """ Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked. """ if name is None: return advice = (f, a, kw) debug = self.get(DEBUG) frame = currentframe() if frame is None: logger.debug('currentframe() failed to return frame') else: if name in self._called: self.__advice_stack_frame_protection(frame) if debug: logger.debug( "advise '%s' invoked by %s:%d", name, frame.f_back.f_code.co_filename, frame.f_back.f_lineno, ) if debug > 1: # use the memory address of the tuple which should # be stable self._frames[id(advice)] = ''.join( format_stack(frame.f_back)) self._advices[name] = self._advices.get(name, []) self._advices[name].append(advice)
python
def advise(self, name, f, *a, **kw): """ Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked. """ if name is None: return advice = (f, a, kw) debug = self.get(DEBUG) frame = currentframe() if frame is None: logger.debug('currentframe() failed to return frame') else: if name in self._called: self.__advice_stack_frame_protection(frame) if debug: logger.debug( "advise '%s' invoked by %s:%d", name, frame.f_back.f_code.co_filename, frame.f_back.f_lineno, ) if debug > 1: # use the memory address of the tuple which should # be stable self._frames[id(advice)] = ''.join( format_stack(frame.f_back)) self._advices[name] = self._advices.get(name, []) self._advices[name].append(advice)
[ "def", "advise", "(", "self", ",", "name", ",", "f", ",", "*", "a", ",", "*", "*", "kw", ")", ":", "if", "name", "is", "None", ":", "return", "advice", "=", "(", "f", ",", "a", ",", "kw", ")", "debug", "=", "self", ".", "get", "(", "DEBUG",...
Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked.
[ "Add", "an", "advice", "that", "will", "be", "handled", "later", "by", "the", "handle", "method", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L666-L706
train
45,798
calmjs/calmjs
src/calmjs/toolchain.py
Spec.handle
def handle(self, name): """ Call all advices at the provided name. This has an analogue in the join point in aspected oriented programming, but the analogy is a weak one as we don't have the proper metaobject protocol to support this. Implementation that make use of this system should make it clear that they will call this method with name associated with its group before and after its execution, or that the method at hand that want this invoked be called by this other conductor method. For the Toolchain standard steps (prepare, compile, assemble, link and finalize), this handle method will only be called by invoking the toolchain as a callable. Calling those methods piecemeal will not trigger the invocation, even though it probably should. Modules, classes and methods that desire to call their own handler should instead follow the convention where the handle be called before and after with the appropriate names. For instance: def test(self, spec): spec.handle(BEFORE_TEST) # do the things spec.handle(AFTER_TEST) This arrangement will need to be revisited when a proper system is written at the metaclass level. Arguments: name The name of the advices group. All the callables registered to this group will be invoked, last-in-first-out. """ if name in self._called: logger.warning( "advice group '%s' has been called for this spec %r", name, self, ) # only now ensure checking self.__advice_stack_frame_protection(currentframe()) else: self._called.add(name) # Get a complete clone, so indirect manipulation done to the # reference that others have access to will not have an effect # within the scope of this execution. Please refer to the # test_toolchain, test_spec_advice_no_infinite_pop test case. advices = [] advices.extend(self._advices.get(name, [])) if advices and self.get('debug'): logger.debug( "handling %d advices in group '%s' ", len(advices), name) while advices: try: # cleanup basically done lifo (last in first out) values = advices.pop() advice, a, kw = values if not ((callable(advice)) and isinstance(a, tuple) and isinstance(kw, dict)): raise TypeError except ValueError: logger.info('Spec advice extraction error: got %s', values) except TypeError: logger.info('Spec advice malformed: got %s', values) else: try: try: advice(*a, **kw) except Exception as e: # get that back by the id. frame = self._frames.get(id(values)) if frame: logger.info('Spec advice exception: %r', e) logger.info( 'Traceback for original advice:\n%s', frame) # continue on for the normal exception raise except AdviceCancel as e: logger.info( "advice %s in group '%s' signaled its cancellation " "during its execution: %s", advice, name, e ) if self.get(DEBUG): logger.debug( 'showing traceback for cancellation', exc_info=1, ) except AdviceAbort as e: # this is a signaled error with a planned abortion logger.warning( "advice %s in group '%s' encountered a known error " "during its execution: %s; continuing with toolchain " "execution", advice, name, e ) if self.get(DEBUG): logger.warning( 'showing traceback for error', exc_info=1, ) except ToolchainCancel: # this is the safe cancel raise except ToolchainAbort as e: logger.critical( "an advice in group '%s' triggered an abort: %s", name, str(e) ) raise except KeyboardInterrupt: raise ToolchainCancel('interrupted') except Exception as e: # a completely unplanned failure logger.critical( "advice %s in group '%s' terminated due to an " "unexpected exception: %s", advice, name, e ) if self.get(DEBUG): logger.critical( 'showing traceback for error', exc_info=1, )
python
def handle(self, name): """ Call all advices at the provided name. This has an analogue in the join point in aspected oriented programming, but the analogy is a weak one as we don't have the proper metaobject protocol to support this. Implementation that make use of this system should make it clear that they will call this method with name associated with its group before and after its execution, or that the method at hand that want this invoked be called by this other conductor method. For the Toolchain standard steps (prepare, compile, assemble, link and finalize), this handle method will only be called by invoking the toolchain as a callable. Calling those methods piecemeal will not trigger the invocation, even though it probably should. Modules, classes and methods that desire to call their own handler should instead follow the convention where the handle be called before and after with the appropriate names. For instance: def test(self, spec): spec.handle(BEFORE_TEST) # do the things spec.handle(AFTER_TEST) This arrangement will need to be revisited when a proper system is written at the metaclass level. Arguments: name The name of the advices group. All the callables registered to this group will be invoked, last-in-first-out. """ if name in self._called: logger.warning( "advice group '%s' has been called for this spec %r", name, self, ) # only now ensure checking self.__advice_stack_frame_protection(currentframe()) else: self._called.add(name) # Get a complete clone, so indirect manipulation done to the # reference that others have access to will not have an effect # within the scope of this execution. Please refer to the # test_toolchain, test_spec_advice_no_infinite_pop test case. advices = [] advices.extend(self._advices.get(name, [])) if advices and self.get('debug'): logger.debug( "handling %d advices in group '%s' ", len(advices), name) while advices: try: # cleanup basically done lifo (last in first out) values = advices.pop() advice, a, kw = values if not ((callable(advice)) and isinstance(a, tuple) and isinstance(kw, dict)): raise TypeError except ValueError: logger.info('Spec advice extraction error: got %s', values) except TypeError: logger.info('Spec advice malformed: got %s', values) else: try: try: advice(*a, **kw) except Exception as e: # get that back by the id. frame = self._frames.get(id(values)) if frame: logger.info('Spec advice exception: %r', e) logger.info( 'Traceback for original advice:\n%s', frame) # continue on for the normal exception raise except AdviceCancel as e: logger.info( "advice %s in group '%s' signaled its cancellation " "during its execution: %s", advice, name, e ) if self.get(DEBUG): logger.debug( 'showing traceback for cancellation', exc_info=1, ) except AdviceAbort as e: # this is a signaled error with a planned abortion logger.warning( "advice %s in group '%s' encountered a known error " "during its execution: %s; continuing with toolchain " "execution", advice, name, e ) if self.get(DEBUG): logger.warning( 'showing traceback for error', exc_info=1, ) except ToolchainCancel: # this is the safe cancel raise except ToolchainAbort as e: logger.critical( "an advice in group '%s' triggered an abort: %s", name, str(e) ) raise except KeyboardInterrupt: raise ToolchainCancel('interrupted') except Exception as e: # a completely unplanned failure logger.critical( "advice %s in group '%s' terminated due to an " "unexpected exception: %s", advice, name, e ) if self.get(DEBUG): logger.critical( 'showing traceback for error', exc_info=1, )
[ "def", "handle", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_called", ":", "logger", ".", "warning", "(", "\"advice group '%s' has been called for this spec %r\"", ",", "name", ",", "self", ",", ")", "# only now ensure checking", "self...
Call all advices at the provided name. This has an analogue in the join point in aspected oriented programming, but the analogy is a weak one as we don't have the proper metaobject protocol to support this. Implementation that make use of this system should make it clear that they will call this method with name associated with its group before and after its execution, or that the method at hand that want this invoked be called by this other conductor method. For the Toolchain standard steps (prepare, compile, assemble, link and finalize), this handle method will only be called by invoking the toolchain as a callable. Calling those methods piecemeal will not trigger the invocation, even though it probably should. Modules, classes and methods that desire to call their own handler should instead follow the convention where the handle be called before and after with the appropriate names. For instance: def test(self, spec): spec.handle(BEFORE_TEST) # do the things spec.handle(AFTER_TEST) This arrangement will need to be revisited when a proper system is written at the metaclass level. Arguments: name The name of the advices group. All the callables registered to this group will be invoked, last-in-first-out.
[ "Call", "all", "advices", "at", "the", "provided", "name", "." ]
b9b407c2b6a7662da64bccba93bb8d92e7a5fafd
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L708-L831
train
45,799