repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
azraq27/neural
neural/dsets.py
nifti_copy
def nifti_copy(filename,prefix=None,gzip=True): ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string''' # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean if prefix==None: prefix = filename nifti_filename = globals()['prefix'](prefix) + ".nii" if gzip: nifti_filename += '.gz' if not os.path.exists(nifti_filename): try: subprocess.check_call(['3dAFNItoNIFTI','-prefix',nifti_filename,str(filename)]) except subprocess.CalledProcessError: nl.notify('Error: could not convert "%s" to NIFTI dset!' % filename,level=nl.level.error) return None return nifti_filename
python
def nifti_copy(filename,prefix=None,gzip=True): ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string''' # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean if prefix==None: prefix = filename nifti_filename = globals()['prefix'](prefix) + ".nii" if gzip: nifti_filename += '.gz' if not os.path.exists(nifti_filename): try: subprocess.check_call(['3dAFNItoNIFTI','-prefix',nifti_filename,str(filename)]) except subprocess.CalledProcessError: nl.notify('Error: could not convert "%s" to NIFTI dset!' % filename,level=nl.level.error) return None return nifti_filename
[ "def", "nifti_copy", "(", "filename", ",", "prefix", "=", "None", ",", "gzip", "=", "True", ")", ":", "# I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean", "if", "prefix", "==", "None", ":", "prefix", "=", "filenam...
creates a ``.nii`` copy of the given dataset and returns the filename as a string
[ "creates", "a", ".", "nii", "copy", "of", "the", "given", "dataset", "and", "returns", "the", "filename", "as", "a", "string" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L28-L42
azraq27/neural
neural/dsets.py
dset_copy
def dset_copy(dset,to_dir): '''robust way to copy a dataset (including AFNI briks)''' if nl.is_afni(dset): dset_strip = re.sub(r'\.(HEAD|BRIK)?(\.(gz|bz))?','',dset) for dset_file in [dset_strip + '.HEAD'] + glob.glob(dset_strip + '.BRIK*'): if os.path.exists(dset_file): shutil.copy(dset_file,to_dir) else: if os.path.exists(dset): shutil.copy(dset,to_dir) else: nl.notify('Warning: couldn\'t find file %s to copy to %s' %(dset,to_dir),level=nl.level.warning)
python
def dset_copy(dset,to_dir): '''robust way to copy a dataset (including AFNI briks)''' if nl.is_afni(dset): dset_strip = re.sub(r'\.(HEAD|BRIK)?(\.(gz|bz))?','',dset) for dset_file in [dset_strip + '.HEAD'] + glob.glob(dset_strip + '.BRIK*'): if os.path.exists(dset_file): shutil.copy(dset_file,to_dir) else: if os.path.exists(dset): shutil.copy(dset,to_dir) else: nl.notify('Warning: couldn\'t find file %s to copy to %s' %(dset,to_dir),level=nl.level.warning)
[ "def", "dset_copy", "(", "dset", ",", "to_dir", ")", ":", "if", "nl", ".", "is_afni", "(", "dset", ")", ":", "dset_strip", "=", "re", ".", "sub", "(", "r'\\.(HEAD|BRIK)?(\\.(gz|bz))?'", ",", "''", ",", "dset", ")", "for", "dset_file", "in", "[", "dset_...
robust way to copy a dataset (including AFNI briks)
[ "robust", "way", "to", "copy", "a", "dataset", "(", "including", "AFNI", "briks", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L110-L121
azraq27/neural
neural/dsets.py
_dset_info_afni
def _dset_info_afni(dset): ''' returns raw output from running ``3dinfo`` ''' info = DsetInfo() try: raw_info = subprocess.check_output(['3dinfo','-verb',str(dset)],stderr=subprocess.STDOUT) except: return None if raw_info==None: return None # Subbrick info: sub_pattern = r'At sub-brick #(\d+) \'([^\']+)\' datum type is (\w+)(:\s+(.*)\s+to\s+(.*))?\n(.*statcode = (\w+); statpar = (.*)|)' sub_info = re.findall(sub_pattern,raw_info) for brick in sub_info: brick_info = { 'index': int(brick[0]), 'label': brick[1], 'datum': brick[2] } if brick[3]!='': brick_info.update({ 'min': float(brick[4]), 'max': float(brick[5]) }) if brick[6]!='': brick_info.update({ 'stat': brick[7], 'params': brick[8].split() }) info.subbricks.append(brick_info) info.reps = len(info.subbricks) # Dimensions: orient = re.search('\[-orient ([A-Z]+)\]',raw_info) if orient: info.orient = orient.group(1) for axis in ['RL','AP','IS']: m = re.search(r'%s-to-%s extent:\s+([0-9-.]+) \[.\] -to-\s+([0-9-.]+) \[.\] -step-\s+([0-9-.]+) mm \[\s*([0-9]+) voxels\]' % (axis[0],axis[1]),raw_info) if m: info.spatial_from.append(float(m.group(1))) info.spatial_to.append(float(m.group(2))) info.voxel_size.append(float(m.group(3))) info.voxel_dims.append(float(m.group(4))) if len(info.voxel_size)==3: info.voxel_volume = reduce(mul,info.voxel_size) slice_timing = re.findall('-time:[tz][tz] \d+ \d+ [0-9.]+ (.*?) ',raw_info) if len(slice_timing): info.slice_timing = slice_timing[0] TR = re.findall('Time step = ([0-9.]+)s',raw_info) if len(TR): info.TR = float(TR[0]) # Other info.. details_regex = { 'identifier': r'Identifier Code:\s+(.*)', 'filetype': r'Storage Mode:\s+(.*)', 'space': r'Template Space:\s+(.*)' } for d in details_regex: m = re.search(details_regex[d],raw_info) if m: setattr(info,d,m.group(1)) return info
python
def _dset_info_afni(dset): ''' returns raw output from running ``3dinfo`` ''' info = DsetInfo() try: raw_info = subprocess.check_output(['3dinfo','-verb',str(dset)],stderr=subprocess.STDOUT) except: return None if raw_info==None: return None # Subbrick info: sub_pattern = r'At sub-brick #(\d+) \'([^\']+)\' datum type is (\w+)(:\s+(.*)\s+to\s+(.*))?\n(.*statcode = (\w+); statpar = (.*)|)' sub_info = re.findall(sub_pattern,raw_info) for brick in sub_info: brick_info = { 'index': int(brick[0]), 'label': brick[1], 'datum': brick[2] } if brick[3]!='': brick_info.update({ 'min': float(brick[4]), 'max': float(brick[5]) }) if brick[6]!='': brick_info.update({ 'stat': brick[7], 'params': brick[8].split() }) info.subbricks.append(brick_info) info.reps = len(info.subbricks) # Dimensions: orient = re.search('\[-orient ([A-Z]+)\]',raw_info) if orient: info.orient = orient.group(1) for axis in ['RL','AP','IS']: m = re.search(r'%s-to-%s extent:\s+([0-9-.]+) \[.\] -to-\s+([0-9-.]+) \[.\] -step-\s+([0-9-.]+) mm \[\s*([0-9]+) voxels\]' % (axis[0],axis[1]),raw_info) if m: info.spatial_from.append(float(m.group(1))) info.spatial_to.append(float(m.group(2))) info.voxel_size.append(float(m.group(3))) info.voxel_dims.append(float(m.group(4))) if len(info.voxel_size)==3: info.voxel_volume = reduce(mul,info.voxel_size) slice_timing = re.findall('-time:[tz][tz] \d+ \d+ [0-9.]+ (.*?) ',raw_info) if len(slice_timing): info.slice_timing = slice_timing[0] TR = re.findall('Time step = ([0-9.]+)s',raw_info) if len(TR): info.TR = float(TR[0]) # Other info.. details_regex = { 'identifier': r'Identifier Code:\s+(.*)', 'filetype': r'Storage Mode:\s+(.*)', 'space': r'Template Space:\s+(.*)' } for d in details_regex: m = re.search(details_regex[d],raw_info) if m: setattr(info,d,m.group(1)) return info
[ "def", "_dset_info_afni", "(", "dset", ")", ":", "info", "=", "DsetInfo", "(", ")", "try", ":", "raw_info", "=", "subprocess", ".", "check_output", "(", "[", "'3dinfo'", ",", "'-verb'", ",", "str", "(", "dset", ")", "]", ",", "stderr", "=", "subprocess...
returns raw output from running ``3dinfo``
[ "returns", "raw", "output", "from", "running", "3dinfo" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L145-L208
azraq27/neural
neural/dsets.py
dset_info
def dset_info(dset): '''returns a :class:`DsetInfo` object containing the meta-data from ``dset``''' if nl.pkg_available('afni'): return _dset_info_afni(dset) nl.notify('Error: no packages available to get dset info',level=nl.level.error) return None
python
def dset_info(dset): '''returns a :class:`DsetInfo` object containing the meta-data from ``dset``''' if nl.pkg_available('afni'): return _dset_info_afni(dset) nl.notify('Error: no packages available to get dset info',level=nl.level.error) return None
[ "def", "dset_info", "(", "dset", ")", ":", "if", "nl", ".", "pkg_available", "(", "'afni'", ")", ":", "return", "_dset_info_afni", "(", "dset", ")", "nl", ".", "notify", "(", "'Error: no packages available to get dset info'", ",", "level", "=", "nl", ".", "l...
returns a :class:`DsetInfo` object containing the meta-data from ``dset``
[ "returns", "a", ":", "class", ":", "DsetInfo", "object", "containing", "the", "meta", "-", "data", "from", "dset" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L210-L215
azraq27/neural
neural/dsets.py
subbrick
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False): ''' returns a string referencing the given subbrick within a dset This method reads the header of the dataset ``dset``, finds the subbrick whose label matches ``label`` and returns a string of type ``dataset[X]``, which can be used by most AFNI programs to refer to a subbrick within a file The options coef, tstat, fstat, and rstat will add the suffix that is appended to the label by 3dDeconvolve :coef: "#0_Coef" :tstat: "#0_Tstat" :fstat: "_Fstat" :rstat: "_R^2" If ``coef`` or ``tstat`` are set to a number, it will use that parameter number (instead of 0), for models that use multiple parameters (e.g., "TENT"). if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string ''' if coef is not False: if coef is True: coef = 0 label += "#%d_Coef" % coef elif tstat != False: if tstat==True: tstat = 0 label += "#%d_Tstat" % tstat elif fstat: label += "_Fstat" elif rstat: label += "_R^2" info = nl.dset_info(dset) if info==None: nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error) return None i = info.subbrick_labeled(label) if number_only: return i return '%s[%d]' % (dset,i)
python
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False): ''' returns a string referencing the given subbrick within a dset This method reads the header of the dataset ``dset``, finds the subbrick whose label matches ``label`` and returns a string of type ``dataset[X]``, which can be used by most AFNI programs to refer to a subbrick within a file The options coef, tstat, fstat, and rstat will add the suffix that is appended to the label by 3dDeconvolve :coef: "#0_Coef" :tstat: "#0_Tstat" :fstat: "_Fstat" :rstat: "_R^2" If ``coef`` or ``tstat`` are set to a number, it will use that parameter number (instead of 0), for models that use multiple parameters (e.g., "TENT"). if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string ''' if coef is not False: if coef is True: coef = 0 label += "#%d_Coef" % coef elif tstat != False: if tstat==True: tstat = 0 label += "#%d_Tstat" % tstat elif fstat: label += "_Fstat" elif rstat: label += "_R^2" info = nl.dset_info(dset) if info==None: nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error) return None i = info.subbrick_labeled(label) if number_only: return i return '%s[%d]' % (dset,i)
[ "def", "subbrick", "(", "dset", ",", "label", ",", "coef", "=", "False", ",", "tstat", "=", "False", ",", "fstat", "=", "False", ",", "rstat", "=", "False", ",", "number_only", "=", "False", ")", ":", "if", "coef", "is", "not", "False", ":", "if", ...
returns a string referencing the given subbrick within a dset This method reads the header of the dataset ``dset``, finds the subbrick whose label matches ``label`` and returns a string of type ``dataset[X]``, which can be used by most AFNI programs to refer to a subbrick within a file The options coef, tstat, fstat, and rstat will add the suffix that is appended to the label by 3dDeconvolve :coef: "#0_Coef" :tstat: "#0_Tstat" :fstat: "_Fstat" :rstat: "_R^2" If ``coef`` or ``tstat`` are set to a number, it will use that parameter number (instead of 0), for models that use multiple parameters (e.g., "TENT"). if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
[ "returns", "a", "string", "referencing", "the", "given", "subbrick", "within", "a", "dset" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L222-L263
azraq27/neural
neural/dsets.py
dset_grids_equal
def dset_grids_equal(dsets): '''Tests if each dataset in the ``list`` ``dsets`` has the same number of voxels and voxel-widths''' infos = [dset_info(dset) for dset in dsets] for i in xrange(3): if len(set([x.voxel_size[i] for x in infos]))>1 or len(set([x.voxel_dims[i] for x in infos]))>1: return False return True
python
def dset_grids_equal(dsets): '''Tests if each dataset in the ``list`` ``dsets`` has the same number of voxels and voxel-widths''' infos = [dset_info(dset) for dset in dsets] for i in xrange(3): if len(set([x.voxel_size[i] for x in infos]))>1 or len(set([x.voxel_dims[i] for x in infos]))>1: return False return True
[ "def", "dset_grids_equal", "(", "dsets", ")", ":", "infos", "=", "[", "dset_info", "(", "dset", ")", "for", "dset", "in", "dsets", "]", "for", "i", "in", "xrange", "(", "3", ")", ":", "if", "len", "(", "set", "(", "[", "x", ".", "voxel_size", "["...
Tests if each dataset in the ``list`` ``dsets`` has the same number of voxels and voxel-widths
[ "Tests", "if", "each", "dataset", "in", "the", "list", "dsets", "has", "the", "same", "number", "of", "voxels", "and", "voxel", "-", "widths" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L265-L271
azraq27/neural
neural/dsets.py
resample_dset
def resample_dset(dset,template,prefix=None,resam='NN'): '''Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``. Default prefix is to suffix ``_resam`` at the end of ``dset`` Available resampling modes: :NN: Nearest Neighbor :Li: Linear :Cu: Cubic :Bk: Blocky ''' if prefix==None: prefix = nl.suffix(dset,'_resam') nl.run(['3dresample','-master',template,'-rmode',resam,'-prefix',prefix,'-inset',dset])
python
def resample_dset(dset,template,prefix=None,resam='NN'): '''Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``. Default prefix is to suffix ``_resam`` at the end of ``dset`` Available resampling modes: :NN: Nearest Neighbor :Li: Linear :Cu: Cubic :Bk: Blocky ''' if prefix==None: prefix = nl.suffix(dset,'_resam') nl.run(['3dresample','-master',template,'-rmode',resam,'-prefix',prefix,'-inset',dset])
[ "def", "resample_dset", "(", "dset", ",", "template", ",", "prefix", "=", "None", ",", "resam", "=", "'NN'", ")", ":", "if", "prefix", "==", "None", ":", "prefix", "=", "nl", ".", "suffix", "(", "dset", ",", "'_resam'", ")", "nl", ".", "run", "(", ...
Resamples ``dset`` to the grid of ``template`` using resampling mode ``resam``. Default prefix is to suffix ``_resam`` at the end of ``dset`` Available resampling modes: :NN: Nearest Neighbor :Li: Linear :Cu: Cubic :Bk: Blocky
[ "Resamples", "dset", "to", "the", "grid", "of", "template", "using", "resampling", "mode", "resam", ".", "Default", "prefix", "is", "to", "suffix", "_resam", "at", "the", "end", "of", "dset" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L273-L285
azraq27/neural
neural/dsets.py
ijk_to_xyz
def ijk_to_xyz(dset,ijk): '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``''' i = nl.dset_info(dset) orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()] orient_is = [abs(x)-1 for x in orient_codes] rai = [] for rai_i in xrange(3): ijk_i = orient_is[rai_i] if orient_codes[rai_i] > 0: rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i]) else: rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i]) return rai
python
def ijk_to_xyz(dset,ijk): '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``''' i = nl.dset_info(dset) orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()] orient_is = [abs(x)-1 for x in orient_codes] rai = [] for rai_i in xrange(3): ijk_i = orient_is[rai_i] if orient_codes[rai_i] > 0: rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i]) else: rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i]) return rai
[ "def", "ijk_to_xyz", "(", "dset", ",", "ijk", ")", ":", "i", "=", "nl", ".", "dset_info", "(", "dset", ")", "orient_codes", "=", "[", "int", "(", "x", ")", "for", "x", "in", "nl", ".", "run", "(", "[", "'@AfniOrient2RAImap'", ",", "i", ".", "orie...
convert the dset indices ``ijk`` to RAI coordinates ``xyz``
[ "convert", "the", "dset", "indices", "ijk", "to", "RAI", "coordinates", "xyz" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L287-L299
azraq27/neural
neural/dsets.py
bounding_box
def bounding_box(dset): '''return the coordinates (in RAI) of the corners of a box enclosing the data in ``dset``''' o = nl.run(["3dAutobox","-input",dset]) ijk_coords = re.findall(r'[xyz]=(\d+)\.\.(\d+)',o.output) from_rai = ijk_to_xyz(dset,[float(x[0]) for x in ijk_coords]) to_rai = ijk_to_xyz(dset,[float(x[1]) for x in ijk_coords]) return (from_rai,to_rai)
python
def bounding_box(dset): '''return the coordinates (in RAI) of the corners of a box enclosing the data in ``dset``''' o = nl.run(["3dAutobox","-input",dset]) ijk_coords = re.findall(r'[xyz]=(\d+)\.\.(\d+)',o.output) from_rai = ijk_to_xyz(dset,[float(x[0]) for x in ijk_coords]) to_rai = ijk_to_xyz(dset,[float(x[1]) for x in ijk_coords]) return (from_rai,to_rai)
[ "def", "bounding_box", "(", "dset", ")", ":", "o", "=", "nl", ".", "run", "(", "[", "\"3dAutobox\"", ",", "\"-input\"", ",", "dset", "]", ")", "ijk_coords", "=", "re", ".", "findall", "(", "r'[xyz]=(\\d+)\\.\\.(\\d+)'", ",", "o", ".", "output", ")", "f...
return the coordinates (in RAI) of the corners of a box enclosing the data in ``dset``
[ "return", "the", "coordinates", "(", "in", "RAI", ")", "of", "the", "corners", "of", "a", "box", "enclosing", "the", "data", "in", "dset" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L301-L307
azraq27/neural
neural/dsets.py
value_at_coord
def value_at_coord(dset,coords): '''returns value at specified coordinate in ``dset``''' return nl.numberize(nl.run(['3dmaskave','-q','-dbox'] + list(coords) + [dset],stderr=None).output)
python
def value_at_coord(dset,coords): '''returns value at specified coordinate in ``dset``''' return nl.numberize(nl.run(['3dmaskave','-q','-dbox'] + list(coords) + [dset],stderr=None).output)
[ "def", "value_at_coord", "(", "dset", ",", "coords", ")", ":", "return", "nl", ".", "numberize", "(", "nl", ".", "run", "(", "[", "'3dmaskave'", ",", "'-q'", ",", "'-dbox'", "]", "+", "list", "(", "coords", ")", "+", "[", "dset", "]", ",", "stderr"...
returns value at specified coordinate in ``dset``
[ "returns", "value", "at", "specified", "coordinate", "in", "dset" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L309-L311
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_printActivities
def do_printActivities(self,args): """Print scaling activities""" parser = CommandArgumentParser("printActivities") parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); args = vars(parser.parse_args(args)) refresh = args['refresh'] or not self.activities if refresh: response = self.client.describe_scaling_activities(AutoScalingGroupName=self.scalingGroup) self.activities = response['Activities'] index = 0 for activity in self.activities: print "{}: {} -> {} {}: {}".format(index,activity['StartTime'],stdplus.defaultifyDict(activity,'EndTime',''),activity['StatusCode'],activity['Description']) index = index + 1
python
def do_printActivities(self,args): """Print scaling activities""" parser = CommandArgumentParser("printActivities") parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); args = vars(parser.parse_args(args)) refresh = args['refresh'] or not self.activities if refresh: response = self.client.describe_scaling_activities(AutoScalingGroupName=self.scalingGroup) self.activities = response['Activities'] index = 0 for activity in self.activities: print "{}: {} -> {} {}: {}".format(index,activity['StartTime'],stdplus.defaultifyDict(activity,'EndTime',''),activity['StatusCode'],activity['Description']) index = index + 1
[ "def", "do_printActivities", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"printActivities\"", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--refresh'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'re...
Print scaling activities
[ "Print", "scaling", "activities" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L18-L32
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_printActivity
def do_printActivity(self,args): """Print scaling activity details""" parser = CommandArgumentParser("printActivity") parser.add_argument(dest='index',type=int,help='refresh'); args = vars(parser.parse_args(args)) index = args['index'] activity = self.activities[index] pprint(activity)
python
def do_printActivity(self,args): """Print scaling activity details""" parser = CommandArgumentParser("printActivity") parser.add_argument(dest='index',type=int,help='refresh'); args = vars(parser.parse_args(args)) index = args['index'] activity = self.activities[index] pprint(activity)
[ "def", "do_printActivity", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"printActivity\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'index'", ",", "type", "=", "int", ",", "help", "=", "'refresh'", ")", "...
Print scaling activity details
[ "Print", "scaling", "activity", "details" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L34-L42
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_printInstances
def do_printInstances(self,args): """Print the list of instances in this auto scaling group. printInstances -h for detailed help""" parser = CommandArgumentParser("printInstances") parser.add_argument(dest='filters',nargs='*',default=["*"],help='Filter instances'); parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses'); parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags'); parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details'); parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones'); args = vars(parser.parse_args(args)) client = AwsConnectionFactory.getEc2Client() filters = args['filters'] addresses = args['addresses'] tags = args['tags'] details = args['details'] availabilityZones = args['availabilityZones'] needDescription = addresses or tags or details if args['refresh']: self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup]) # print "AutoScaling Group:{}".format(self.scalingGroup) print "=== Instances ===" instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances) if availabilityZones: instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances) index = 0 for instance in instances: instance['index'] = index print "* {0:3d} {1} {2} {3}".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId']) description = None if needDescription: description = client.describe_instances(InstanceIds=[instance['InstanceId']]) if addresses: networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces'] number = 0 print " Network Interfaces:" for interface in networkInterfaces: print " * {0:3d} {1}".format(number, interface['PrivateIpAddress']) number +=1 if tags: tags = description['Reservations'][0]['Instances'][0]['Tags'] print " Tags:" for tag in tags: print " * {0} {1}".format(tag['Key'],tag['Value']) if details: pprint(description) index += 1
python
def do_printInstances(self,args): """Print the list of instances in this auto scaling group. printInstances -h for detailed help""" parser = CommandArgumentParser("printInstances") parser.add_argument(dest='filters',nargs='*',default=["*"],help='Filter instances'); parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses'); parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags'); parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details'); parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh'); parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones'); args = vars(parser.parse_args(args)) client = AwsConnectionFactory.getEc2Client() filters = args['filters'] addresses = args['addresses'] tags = args['tags'] details = args['details'] availabilityZones = args['availabilityZones'] needDescription = addresses or tags or details if args['refresh']: self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup]) # print "AutoScaling Group:{}".format(self.scalingGroup) print "=== Instances ===" instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances) if availabilityZones: instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances) index = 0 for instance in instances: instance['index'] = index print "* {0:3d} {1} {2} {3}".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId']) description = None if needDescription: description = client.describe_instances(InstanceIds=[instance['InstanceId']]) if addresses: networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces'] number = 0 print " Network Interfaces:" for interface in networkInterfaces: print " * {0:3d} {1}".format(number, interface['PrivateIpAddress']) number +=1 if tags: tags = description['Reservations'][0]['Instances'][0]['Tags'] print " Tags:" for tag in tags: print " * {0} {1}".format(tag['Key'],tag['Value']) if details: pprint(description) index += 1
[ "def", "do_printInstances", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"printInstances\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'filters'", ",", "nargs", "=", "'*'", ",", "default", "=", "[", "\"*\"",...
Print the list of instances in this auto scaling group. printInstances -h for detailed help
[ "Print", "the", "list", "of", "instances", "in", "this", "auto", "scaling", "group", ".", "printInstances", "-", "h", "for", "detailed", "help" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L44-L97
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_printPolicy
def do_printPolicy(self,args): """Print the autoscaling policy""" parser = CommandArgumentParser("printPolicy") args = vars(parser.parse_args(args)) policy = self.client.describe_policies(AutoScalingGroupName=self.scalingGroup) pprint(policy)
python
def do_printPolicy(self,args): """Print the autoscaling policy""" parser = CommandArgumentParser("printPolicy") args = vars(parser.parse_args(args)) policy = self.client.describe_policies(AutoScalingGroupName=self.scalingGroup) pprint(policy)
[ "def", "do_printPolicy", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"printPolicy\"", ")", "args", "=", "vars", "(", "parser", ".", "parse_args", "(", "args", ")", ")", "policy", "=", "self", ".", "client", ".", "de...
Print the autoscaling policy
[ "Print", "the", "autoscaling", "policy" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L99-L105
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_rebootInstance
def do_rebootInstance(self,args): """Restart specified instance""" parser = CommandArgumentParser("rebootInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.reboot_instances(InstanceIds=[instanceId['InstanceId']])
python
def do_rebootInstance(self,args): """Restart specified instance""" parser = CommandArgumentParser("rebootInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.reboot_instances(InstanceIds=[instanceId['InstanceId']])
[ "def", "do_rebootInstance", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"rebootInstance\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "args", "=",...
Restart specified instance
[ "Restart", "specified", "instance" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L108-L123
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_setDesiredCapacity
def do_setDesiredCapacity(self,args): """Set the desired capacity""" parser = CommandArgumentParser("setDesiredCapacity") parser.add_argument(dest='value',type=int,help='new value'); args = vars(parser.parse_args(args)) value = int(args['value']) print "Setting desired capacity to {}".format(value) client = AwsConnectionFactory.getAsgClient() client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True) print "Scaling activity in progress"
python
def do_setDesiredCapacity(self,args): """Set the desired capacity""" parser = CommandArgumentParser("setDesiredCapacity") parser.add_argument(dest='value',type=int,help='new value'); args = vars(parser.parse_args(args)) value = int(args['value']) print "Setting desired capacity to {}".format(value) client = AwsConnectionFactory.getAsgClient() client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True) print "Scaling activity in progress"
[ "def", "do_setDesiredCapacity", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"setDesiredCapacity\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'value'", ",", "type", "=", "int", ",", "help", "=", "'new value'"...
Set the desired capacity
[ "Set", "the", "desired", "capacity" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L126-L136
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_run
def do_run(self,args): """SSH to each instance in turn and run specified command""" parser = CommandArgumentParser("run") parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.") parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.") parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key') parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command') parser.add_argument(dest='command',nargs='+',help="Command to run on all hosts.") # consider adding a filter option later parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); parser.add_argument('-j',dest='jobs',type=int,default=1,help='Number of hosts to contact in parallel'); parser.add_argument('-s',dest='skip',type=int,default=0,help='Skip this many hosts'); parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host'); args = vars(parser.parse_args(args)) replaceKey = args['replaceKey'] keyscan = args['keyscan'] verbosity = args['verbosity'] jobs = args['jobs'] skip = args['skip'] ignoreHostKey = args['ignore-host-key'] noEcho = args['no-echo'] instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = instances[skip:] # if replaceKey or keyscan: # for instance in instances: # stdplus.resetKnownHost(instance) if args['macro']: if len(args['command']) > 1: print("Only one macro may be specified with the -m switch.") return else: macro = args['command'][0] print("Macro:{}".format(macro)) command = Config.config['ssh-macros'][macro] else: command = ' '.join(args['command']) Parallel(n_jobs=jobs)( delayed(ssh)(instance['InstanceId'],0,[],replaceKey,keyscan,False,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand=not noEcho,name="{}:{}: ".format(instance['index'],instance['InstanceId'])) for instance in instances )
python
def do_run(self,args): """SSH to each instance in turn and run specified command""" parser = CommandArgumentParser("run") parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.") parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.") parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key') parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command') parser.add_argument(dest='command',nargs='+',help="Command to run on all hosts.") # consider adding a filter option later parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); parser.add_argument('-j',dest='jobs',type=int,default=1,help='Number of hosts to contact in parallel'); parser.add_argument('-s',dest='skip',type=int,default=0,help='Skip this many hosts'); parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host'); args = vars(parser.parse_args(args)) replaceKey = args['replaceKey'] keyscan = args['keyscan'] verbosity = args['verbosity'] jobs = args['jobs'] skip = args['skip'] ignoreHostKey = args['ignore-host-key'] noEcho = args['no-echo'] instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instances = instances[skip:] # if replaceKey or keyscan: # for instance in instances: # stdplus.resetKnownHost(instance) if args['macro']: if len(args['command']) > 1: print("Only one macro may be specified with the -m switch.") return else: macro = args['command'][0] print("Macro:{}".format(macro)) command = Config.config['ssh-macros'][macro] else: command = ' '.join(args['command']) Parallel(n_jobs=jobs)( delayed(ssh)(instance['InstanceId'],0,[],replaceKey,keyscan,False,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand=not noEcho,name="{}:{}: ".format(instance['index'],instance['InstanceId'])) for instance in instances )
[ "def", "do_run", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"run\"", ")", "parser", ".", "add_argument", "(", "'-R'", ",", "'--replace-key'", ",", "dest", "=", "'replaceKey'", ",", "default", "=", "False", ",", "acti...
SSH to each instance in turn and run specified command
[ "SSH", "to", "each", "instance", "in", "turn", "and", "run", "specified", "command" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L138-L179
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_ssh
def do_ssh(self,args): """SSH to an instance. ssh -h for detailed help""" parser = CommandArgumentParser("ssh") parser.add_argument(dest='instance',help='instance index or name'); parser.add_argument('-a','--address-number',default='0',dest='interface-number',help='instance id of the instance to ssh to'); parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key') parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command') parser.add_argument('-L',dest='forwarding',nargs='*',help="port forwarding string of the form: {localport}:{host-visible-to-instance}:{remoteport} or {port}") parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.") parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.") parser.add_argument('-B','--background',dest='background',default=False,action='store_true',help="Run in the background. (e.g., forward an ssh session and then do other stuff in aws-shell).") parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host'); parser.add_argument(dest='command',nargs='*',help="Command to run on all hosts.") # consider adding a filter option later args = vars(parser.parse_args(args)) interfaceNumber = int(args['interface-number']) forwarding = args['forwarding'] replaceKey = args['replaceKey'] keyscan = args['keyscan'] background = args['background'] verbosity = args['verbosity'] ignoreHostKey = args['ignore-host-key'] noEcho = args['no-echo'] # Figure out the host to connect to: target = args['instance'] try: index = int(args['instance']) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instance = instances[index] target = instance['InstanceId'] except ValueError: # if args['instance'] is not an int, for example. pass if args['macro']: if len(args['command']) > 1: print("Only one macro may be specified with the -m switch.") return else: macro = args['command'][0] print("Macro:{}".format(macro)) command = Config.config['ssh-macros'][macro] else: command = ' '.join(args['command']) ssh(target,interfaceNumber,forwarding,replaceKey,keyscan,background,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand = not noEcho)
python
def do_ssh(self,args): """SSH to an instance. ssh -h for detailed help""" parser = CommandArgumentParser("ssh") parser.add_argument(dest='instance',help='instance index or name'); parser.add_argument('-a','--address-number',default='0',dest='interface-number',help='instance id of the instance to ssh to'); parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key') parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command') parser.add_argument('-L',dest='forwarding',nargs='*',help="port forwarding string of the form: {localport}:{host-visible-to-instance}:{remoteport} or {port}") parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.") parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.") parser.add_argument('-B','--background',dest='background',default=False,action='store_true',help="Run in the background. (e.g., forward an ssh session and then do other stuff in aws-shell).") parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host'); parser.add_argument(dest='command',nargs='*',help="Command to run on all hosts.") # consider adding a filter option later args = vars(parser.parse_args(args)) interfaceNumber = int(args['interface-number']) forwarding = args['forwarding'] replaceKey = args['replaceKey'] keyscan = args['keyscan'] background = args['background'] verbosity = args['verbosity'] ignoreHostKey = args['ignore-host-key'] noEcho = args['no-echo'] # Figure out the host to connect to: target = args['instance'] try: index = int(args['instance']) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instance = instances[index] target = instance['InstanceId'] except ValueError: # if args['instance'] is not an int, for example. pass if args['macro']: if len(args['command']) > 1: print("Only one macro may be specified with the -m switch.") return else: macro = args['command'][0] print("Macro:{}".format(macro)) command = Config.config['ssh-macros'][macro] else: command = ' '.join(args['command']) ssh(target,interfaceNumber,forwarding,replaceKey,keyscan,background,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand = not noEcho)
[ "def", "do_ssh", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"ssh\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "parser", ".", "add_argument", ...
SSH to an instance. ssh -h for detailed help
[ "SSH", "to", "an", "instance", ".", "ssh", "-", "h", "for", "detailed", "help" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L181-L227
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_startInstance
def do_startInstance(self,args): """Start specified instance""" parser = CommandArgumentParser("startInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.start_instances(InstanceIds=[instanceId['InstanceId']])
python
def do_startInstance(self,args): """Start specified instance""" parser = CommandArgumentParser("startInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.start_instances(InstanceIds=[instanceId['InstanceId']])
[ "def", "do_startInstance", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"startInstance\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "args", "=", ...
Start specified instance
[ "Start", "specified", "instance" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L229-L245
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_stopInstance
def do_stopInstance(self,args): """Stop specified instance""" parser = CommandArgumentParser("stopInstance") parser.add_argument(dest='instance',help='instance index or name'); parser.add_argument('-f','--force',action='store_true',dest='force',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.stop_instances(InstanceIds=[instanceId['InstanceId']],Force=force)
python
def do_stopInstance(self,args): """Stop specified instance""" parser = CommandArgumentParser("stopInstance") parser.add_argument(dest='instance',help='instance index or name'); parser.add_argument('-f','--force',action='store_true',dest='force',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.stop_instances(InstanceIds=[instanceId['InstanceId']],Force=force)
[ "def", "do_stopInstance", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"stopInstance\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "parser", ".", ...
Stop specified instance
[ "Stop", "specified", "instance" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L247-L264
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_terminateInstance
def do_terminateInstance(self,args): """Terminate an EC2 instance""" parser = CommandArgumentParser("terminateInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.terminate_instances(InstanceIds=[instanceId['InstanceId']]) self.do_printInstances("-r")
python
def do_terminateInstance(self,args): """Terminate an EC2 instance""" parser = CommandArgumentParser("terminateInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.terminate_instances(InstanceIds=[instanceId['InstanceId']]) self.do_printInstances("-r")
[ "def", "do_terminateInstance", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"terminateInstance\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'instance'", ",", "help", "=", "'instance index or name'", ")", "args", ...
Terminate an EC2 instance
[ "Terminate", "an", "EC2", "instance" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L266-L282
earlye/nephele
nephele/AwsAutoScalingGroup.py
AwsAutoScalingGroup.do_updateCapacity
def do_updateCapacity(self,args): """Set the desired capacity""" parser = CommandArgumentParser("updateMinMax") parser.add_argument('-m','--min',dest='min',type=int,help='new values'); parser.add_argument('-M','--max',dest='max',type=int,help='new values'); parser.add_argument('-d','--desired',dest='desired',type=int,help='desired'); args = vars(parser.parse_args(args)) minSize = args['min'] maxSize = args['max'] desired = args['desired'] print "Setting desired capacity to {}-{}, {}".format(minSize,maxSize,desired) client = AwsConnectionFactory.getAsgClient() client.update_auto_scaling_group(AutoScalingGroupName=self.scalingGroup,MinSize=minSize,MaxSize=maxSize,DesiredCapacity=desired) #client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True) print "Scaling activity in progress"
python
def do_updateCapacity(self,args): """Set the desired capacity""" parser = CommandArgumentParser("updateMinMax") parser.add_argument('-m','--min',dest='min',type=int,help='new values'); parser.add_argument('-M','--max',dest='max',type=int,help='new values'); parser.add_argument('-d','--desired',dest='desired',type=int,help='desired'); args = vars(parser.parse_args(args)) minSize = args['min'] maxSize = args['max'] desired = args['desired'] print "Setting desired capacity to {}-{}, {}".format(minSize,maxSize,desired) client = AwsConnectionFactory.getAsgClient() client.update_auto_scaling_group(AutoScalingGroupName=self.scalingGroup,MinSize=minSize,MaxSize=maxSize,DesiredCapacity=desired) #client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True) print "Scaling activity in progress"
[ "def", "do_updateCapacity", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"updateMinMax\"", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--min'", ",", "dest", "=", "'min'", ",", "type", "=", "int", ",", "help...
Set the desired capacity
[ "Set", "the", "desired", "capacity" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsAutoScalingGroup.py#L284-L300
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
main
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()): """ Run the appropriate functions in order :param sequencepath: path of folder containing FASTA genomes :param report: boolean to determine whether a report is to be created :param refseq_database: Path to reduced refseq database sketch :param num_threads: Number of threads to run mash/other stuff on :return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \ n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict """ files = find_files(sequencepath) file_dict = filer(files) printtime('Using MASH to determine genera of samples', start) genus_dict = find_genus(file_dict, refseq_database, threads=num_threads) file_records = fasta_records(file_dict) printtime('Collecting basic quality metrics', start) contig_len_dict, gc_dict = fasta_stats(file_dict, file_records) contig_dist_dict = find_contig_distribution(contig_len_dict) longest_contig_dict = find_largest_contig(contig_len_dict) genome_length_dict = find_genome_length(contig_len_dict) num_contigs_dict = find_num_contigs(contig_len_dict) n50_dict = find_n50(contig_len_dict, genome_length_dict) n75_dict = find_n75(contig_len_dict, genome_length_dict) n90_dict = find_n90(contig_len_dict, genome_length_dict) l50_dict = find_l50(contig_len_dict, genome_length_dict) l75_dict = find_l75(contig_len_dict, genome_length_dict) l90_dict = find_l90(contig_len_dict, genome_length_dict) printtime('Using prodigal to calculate number of ORFs in each sample', start) orf_file_dict = predict_orfs(file_dict, num_threads=num_threads) orf_dist_dict = find_orf_distribution(orf_file_dict) if report: reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath) printtime('Features extracted!', start) return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \ n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
python
def main(sequencepath, report, refseq_database, num_threads=12, start=time.time()): """ Run the appropriate functions in order :param sequencepath: path of folder containing FASTA genomes :param report: boolean to determine whether a report is to be created :param refseq_database: Path to reduced refseq database sketch :param num_threads: Number of threads to run mash/other stuff on :return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \ n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict """ files = find_files(sequencepath) file_dict = filer(files) printtime('Using MASH to determine genera of samples', start) genus_dict = find_genus(file_dict, refseq_database, threads=num_threads) file_records = fasta_records(file_dict) printtime('Collecting basic quality metrics', start) contig_len_dict, gc_dict = fasta_stats(file_dict, file_records) contig_dist_dict = find_contig_distribution(contig_len_dict) longest_contig_dict = find_largest_contig(contig_len_dict) genome_length_dict = find_genome_length(contig_len_dict) num_contigs_dict = find_num_contigs(contig_len_dict) n50_dict = find_n50(contig_len_dict, genome_length_dict) n75_dict = find_n75(contig_len_dict, genome_length_dict) n90_dict = find_n90(contig_len_dict, genome_length_dict) l50_dict = find_l50(contig_len_dict, genome_length_dict) l75_dict = find_l75(contig_len_dict, genome_length_dict) l90_dict = find_l90(contig_len_dict, genome_length_dict) printtime('Using prodigal to calculate number of ORFs in each sample', start) orf_file_dict = predict_orfs(file_dict, num_threads=num_threads) orf_dist_dict = find_orf_distribution(orf_file_dict) if report: reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath) printtime('Features extracted!', start) return gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \ n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
[ "def", "main", "(", "sequencepath", ",", "report", ",", "refseq_database", ",", "num_threads", "=", "12", ",", "start", "=", "time", ".", "time", "(", ")", ")", ":", "files", "=", "find_files", "(", "sequencepath", ")", "file_dict", "=", "filer", "(", ...
Run the appropriate functions in order :param sequencepath: path of folder containing FASTA genomes :param report: boolean to determine whether a report is to be created :param refseq_database: Path to reduced refseq database sketch :param num_threads: Number of threads to run mash/other stuff on :return: gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, \ n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict
[ "Run", "the", "appropriate", "functions", "in", "order", ":", "param", "sequencepath", ":", "path", "of", "folder", "containing", "FASTA", "genomes", ":", "param", "report", ":", "boolean", "to", "determine", "whether", "a", "report", "is", "to", "be", "crea...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L16-L51
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_files
def find_files(sequencepath): """ Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files """ # Create a sorted list of all the FASTA files in the sequence path files = sorted(glob(os.path.join(sequencepath, '*.fa*'))) return files
python
def find_files(sequencepath): """ Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files """ # Create a sorted list of all the FASTA files in the sequence path files = sorted(glob(os.path.join(sequencepath, '*.fa*'))) return files
[ "def", "find_files", "(", "sequencepath", ")", ":", "# Create a sorted list of all the FASTA files in the sequence path", "files", "=", "sorted", "(", "glob", "(", "os", ".", "path", ".", "join", "(", "sequencepath", ",", "'*.fa*'", ")", ")", ")", "return", "files...
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files
[ "Use", "glob", "to", "find", "all", "FASTA", "files", "in", "the", "provided", "sequence", "path", ".", "NOTE", ":", "FASTA", "files", "must", "have", "an", "extension", "such", "as", ".", "fasta", ".", "fa", "or", ".", "fas", ".", "Extensions", "of", ...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L54-L63
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
filer
def filer(filelist): """ Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension) :param filelist: list of files to parse :return filedict: dictionary of stain name: /sequencepath/strain_name.extension """ # Initialise the dictionary filedict = dict() for seqfile in filelist: # Split off the file extension and remove the path from the name strainname = os.path.splitext(os.path.basename(seqfile))[0] # Populate the dictionary filedict[strainname] = seqfile return filedict
python
def filer(filelist): """ Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension) :param filelist: list of files to parse :return filedict: dictionary of stain name: /sequencepath/strain_name.extension """ # Initialise the dictionary filedict = dict() for seqfile in filelist: # Split off the file extension and remove the path from the name strainname = os.path.splitext(os.path.basename(seqfile))[0] # Populate the dictionary filedict[strainname] = seqfile return filedict
[ "def", "filer", "(", "filelist", ")", ":", "# Initialise the dictionary", "filedict", "=", "dict", "(", ")", "for", "seqfile", "in", "filelist", ":", "# Split off the file extension and remove the path from the name", "strainname", "=", "os", ".", "path", ".", "splite...
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension) :param filelist: list of files to parse :return filedict: dictionary of stain name: /sequencepath/strain_name.extension
[ "Helper", "script", "that", "creates", "a", "dictionary", "of", "the", "stain", "name", ":", "/", "sequencepath", "/", "strain_name", ".", "extension", ")", ":", "param", "filelist", ":", "list", "of", "files", "to", "parse", ":", "return", "filedict", ":"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L66-L79
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
fasta_records
def fasta_records(files): """ Use SeqIO to create dictionaries of all records for each FASTA file :param files: dictionary of stain name: /sequencepath/strain_name.extension :return: file_records: dictionary of all contig records for all strains """ # Initialise the dictionary file_records = dict() for file_name, fasta in files.items(): # Create a dictionary of records for each file record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta")) # Set the records dictionary as the value for file_records file_records[file_name] = record_dict return file_records
python
def fasta_records(files): """ Use SeqIO to create dictionaries of all records for each FASTA file :param files: dictionary of stain name: /sequencepath/strain_name.extension :return: file_records: dictionary of all contig records for all strains """ # Initialise the dictionary file_records = dict() for file_name, fasta in files.items(): # Create a dictionary of records for each file record_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta")) # Set the records dictionary as the value for file_records file_records[file_name] = record_dict return file_records
[ "def", "fasta_records", "(", "files", ")", ":", "# Initialise the dictionary", "file_records", "=", "dict", "(", ")", "for", "file_name", ",", "fasta", "in", "files", ".", "items", "(", ")", ":", "# Create a dictionary of records for each file", "record_dict", "=", ...
Use SeqIO to create dictionaries of all records for each FASTA file :param files: dictionary of stain name: /sequencepath/strain_name.extension :return: file_records: dictionary of all contig records for all strains
[ "Use", "SeqIO", "to", "create", "dictionaries", "of", "all", "records", "for", "each", "FASTA", "file", ":", "param", "files", ":", "dictionary", "of", "stain", "name", ":", "/", "sequencepath", "/", "strain_name", ".", "extension", ":", "return", ":", "fi...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L82-L95
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_genus
def find_genus(files, database, threads=12): """ Uses MASH to find the genus of fasta files. :param files: File dictionary returned by filer method. :param database: Path to reduced refseq database sketch. :param threads: Number of threads to run mash with. :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. """ genus_dict = dict() tmpdir = str(time.time()).split('.')[-1] if not os.path.isdir(tmpdir): os.makedirs(tmpdir) for file_name, fasta in files.items(): mash.screen(database, fasta, threads=threads, w='', i=0.95, output_file=os.path.join(tmpdir, 'screen.tab')) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) try: os.remove(os.path.join(tmpdir, 'screen.tab')) except IOError: pass try: genus = screen_output[0].query_id.split('/')[-3] if genus == 'Shigella': genus = 'Escherichia' genus_dict[file_name] = genus except IndexError: genus_dict[file_name] = 'NA' shutil.rmtree(tmpdir) return genus_dict
python
def find_genus(files, database, threads=12): """ Uses MASH to find the genus of fasta files. :param files: File dictionary returned by filer method. :param database: Path to reduced refseq database sketch. :param threads: Number of threads to run mash with. :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. """ genus_dict = dict() tmpdir = str(time.time()).split('.')[-1] if not os.path.isdir(tmpdir): os.makedirs(tmpdir) for file_name, fasta in files.items(): mash.screen(database, fasta, threads=threads, w='', i=0.95, output_file=os.path.join(tmpdir, 'screen.tab')) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) try: os.remove(os.path.join(tmpdir, 'screen.tab')) except IOError: pass try: genus = screen_output[0].query_id.split('/')[-3] if genus == 'Shigella': genus = 'Escherichia' genus_dict[file_name] = genus except IndexError: genus_dict[file_name] = 'NA' shutil.rmtree(tmpdir) return genus_dict
[ "def", "find_genus", "(", "files", ",", "database", ",", "threads", "=", "12", ")", ":", "genus_dict", "=", "dict", "(", ")", "tmpdir", "=", "str", "(", "time", ".", "time", "(", ")", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if"...
Uses MASH to find the genus of fasta files. :param files: File dictionary returned by filer method. :param database: Path to reduced refseq database sketch. :param threads: Number of threads to run mash with. :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
[ "Uses", "MASH", "to", "find", "the", "genus", "of", "fasta", "files", ".", ":", "param", "files", ":", "File", "dictionary", "returned", "by", "filer", "method", ".", ":", "param", "database", ":", "Path", "to", "reduced", "refseq", "database", "sketch", ...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L98-L130
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
fasta_stats
def fasta_stats(files, records): """ Parse the lengths of all contigs for each sample, as well as the total GC% :param files: dictionary of stain name: /sequencepath/strain_name.extension :param records: Dictionary of strain name: SeqIO records :return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains """ # Initialise dictionaries contig_len_dict = dict() gc_dict = dict() for file_name in files: # Initialise variables to store appropriate values parsed from contig records contig_lengths = list() fasta_sequence = str() for contig, record in records[file_name].items(): # Append the length of the contig to the list contig_lengths.append(len(record.seq)) # Add the contig sequence to the string fasta_sequence += record.seq # Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value contig_len_dict[file_name] = sorted(contig_lengths, reverse=True) # Calculate the GC% of the total genome sequence using GC - format to have two decimal places gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence))) return contig_len_dict, gc_dict
python
def fasta_stats(files, records): """ Parse the lengths of all contigs for each sample, as well as the total GC% :param files: dictionary of stain name: /sequencepath/strain_name.extension :param records: Dictionary of strain name: SeqIO records :return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains """ # Initialise dictionaries contig_len_dict = dict() gc_dict = dict() for file_name in files: # Initialise variables to store appropriate values parsed from contig records contig_lengths = list() fasta_sequence = str() for contig, record in records[file_name].items(): # Append the length of the contig to the list contig_lengths.append(len(record.seq)) # Add the contig sequence to the string fasta_sequence += record.seq # Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value contig_len_dict[file_name] = sorted(contig_lengths, reverse=True) # Calculate the GC% of the total genome sequence using GC - format to have two decimal places gc_dict[file_name] = float('{:0.2f}'.format(GC(fasta_sequence))) return contig_len_dict, gc_dict
[ "def", "fasta_stats", "(", "files", ",", "records", ")", ":", "# Initialise dictionaries", "contig_len_dict", "=", "dict", "(", ")", "gc_dict", "=", "dict", "(", ")", "for", "file_name", "in", "files", ":", "# Initialise variables to store appropriate values parsed fr...
Parse the lengths of all contigs for each sample, as well as the total GC% :param files: dictionary of stain name: /sequencepath/strain_name.extension :param records: Dictionary of strain name: SeqIO records :return: contig_len_dict, gc_dict: dictionaries of list of all contig length, and total GC% for all strains
[ "Parse", "the", "lengths", "of", "all", "contigs", "for", "each", "sample", "as", "well", "as", "the", "total", "GC%", ":", "param", "files", ":", "dictionary", "of", "stain", "name", ":", "/", "sequencepath", "/", "strain_name", ".", "extension", ":", "...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L133-L156
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_contig_distribution
def find_contig_distribution(contig_lengths_dict): """ Determine the frequency of different contig size ranges for each strain :param contig_lengths_dict: :return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies """ # Initialise the dictionary contig_len_dist_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Initialise integers to store the number of contigs that fall into the different bin sizes over_1000000 = 0 over_500000 = 0 over_100000 = 0 over_50000 = 0 over_10000 = 0 over_5000 = 0 other = 0 for contig_length in contig_lengths: # Depending on the size of the contig, increment the appropriate integer if contig_length > 1000000: over_1000000 += 1 elif contig_length > 500000: over_500000 += 1 elif contig_length > 100000: over_100000 += 1 elif contig_length > 50000: over_50000 += 1 elif contig_length > 10000: over_10000 += 1 elif contig_length > 5000: over_5000 += 1 else: other += 1 # Populate the dictionary with a tuple of each of the size range frequencies contig_len_dist_dict[file_name] = (over_1000000, over_500000, over_100000, over_50000, over_10000, over_5000, other) return contig_len_dist_dict
python
def find_contig_distribution(contig_lengths_dict): """ Determine the frequency of different contig size ranges for each strain :param contig_lengths_dict: :return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies """ # Initialise the dictionary contig_len_dist_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Initialise integers to store the number of contigs that fall into the different bin sizes over_1000000 = 0 over_500000 = 0 over_100000 = 0 over_50000 = 0 over_10000 = 0 over_5000 = 0 other = 0 for contig_length in contig_lengths: # Depending on the size of the contig, increment the appropriate integer if contig_length > 1000000: over_1000000 += 1 elif contig_length > 500000: over_500000 += 1 elif contig_length > 100000: over_100000 += 1 elif contig_length > 50000: over_50000 += 1 elif contig_length > 10000: over_10000 += 1 elif contig_length > 5000: over_5000 += 1 else: other += 1 # Populate the dictionary with a tuple of each of the size range frequencies contig_len_dist_dict[file_name] = (over_1000000, over_500000, over_100000, over_50000, over_10000, over_5000, other) return contig_len_dist_dict
[ "def", "find_contig_distribution", "(", "contig_lengths_dict", ")", ":", "# Initialise the dictionary", "contig_len_dist_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# Initialise inte...
Determine the frequency of different contig size ranges for each strain :param contig_lengths_dict: :return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
[ "Determine", "the", "frequency", "of", "different", "contig", "size", "ranges", "for", "each", "strain", ":", "param", "contig_lengths_dict", ":", ":", "return", ":", "contig_len_dist_dict", ":", "dictionary", "of", "strain", "name", ":", "tuple", "of", "contig"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L159-L200
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_largest_contig
def find_largest_contig(contig_lengths_dict): """ Determine the largest contig for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: longest_contig_dict: dictionary of strain name: longest contig """ # Initialise the dictionary longest_contig_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # As the list is sorted in descending order, the largest contig is the first entry in the list longest_contig_dict[file_name] = contig_lengths[0] return longest_contig_dict
python
def find_largest_contig(contig_lengths_dict): """ Determine the largest contig for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: longest_contig_dict: dictionary of strain name: longest contig """ # Initialise the dictionary longest_contig_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # As the list is sorted in descending order, the largest contig is the first entry in the list longest_contig_dict[file_name] = contig_lengths[0] return longest_contig_dict
[ "def", "find_largest_contig", "(", "contig_lengths_dict", ")", ":", "# Initialise the dictionary", "longest_contig_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# As the list is sorted...
Determine the largest contig for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: longest_contig_dict: dictionary of strain name: longest contig
[ "Determine", "the", "largest", "contig", "for", "each", "strain", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse", "-", "sorted", "list", "of", "all", "contig", "lengths", ":", "return", ":", "longest_contig_dict", ...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L203-L214
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_genome_length
def find_genome_length(contig_lengths_dict): """ Determine the total length of all the contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: genome_length_dict: dictionary of strain name: total genome length """ # Initialise the dictionary genome_length_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Use the sum() method to add all the contig lengths in the list genome_length_dict[file_name] = sum(contig_lengths) return genome_length_dict
python
def find_genome_length(contig_lengths_dict): """ Determine the total length of all the contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: genome_length_dict: dictionary of strain name: total genome length """ # Initialise the dictionary genome_length_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Use the sum() method to add all the contig lengths in the list genome_length_dict[file_name] = sum(contig_lengths) return genome_length_dict
[ "def", "find_genome_length", "(", "contig_lengths_dict", ")", ":", "# Initialise the dictionary", "genome_length_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# Use the sum() method to...
Determine the total length of all the contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: genome_length_dict: dictionary of strain name: total genome length
[ "Determine", "the", "total", "length", "of", "all", "the", "contigs", "for", "each", "strain", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse", "-", "sorted", "list", "of", "all", "contig", "lengths", ":", "retu...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L217-L228
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_num_contigs
def find_num_contigs(contig_lengths_dict): """ Count the total number of contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: num_contigs_dict: dictionary of strain name: total number of contigs """ # Initialise the dictionary num_contigs_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Use the len() method to count the number of entries in the list num_contigs_dict[file_name] = len(contig_lengths) return num_contigs_dict
python
def find_num_contigs(contig_lengths_dict): """ Count the total number of contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: num_contigs_dict: dictionary of strain name: total number of contigs """ # Initialise the dictionary num_contigs_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Use the len() method to count the number of entries in the list num_contigs_dict[file_name] = len(contig_lengths) return num_contigs_dict
[ "def", "find_num_contigs", "(", "contig_lengths_dict", ")", ":", "# Initialise the dictionary", "num_contigs_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# Use the len() method to cou...
Count the total number of contigs for each strain :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :return: num_contigs_dict: dictionary of strain name: total number of contigs
[ "Count", "the", "total", "number", "of", "contigs", "for", "each", "strain", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse", "-", "sorted", "list", "of", "all", "contig", "lengths", ":", "return", ":", "num_con...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L231-L242
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_n50
def find_n50(contig_lengths_dict, genome_length_dict): """ Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n50_dict: dictionary of strain name: N50 """ # Initialise the dictionary n50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Initialise a variable to store a running total of contig lengths currentlength = 0 for contig_length in contig_lengths: # Increment the current length with the length of the current contig currentlength += contig_length # If the current length is now greater than the total genome / 2, the current contig length is the N50 if currentlength >= genome_length_dict[file_name] * 0.5: # Populate the dictionary, and break the loop n50_dict[file_name] = contig_length break return n50_dict
python
def find_n50(contig_lengths_dict, genome_length_dict): """ Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n50_dict: dictionary of strain name: N50 """ # Initialise the dictionary n50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Initialise a variable to store a running total of contig lengths currentlength = 0 for contig_length in contig_lengths: # Increment the current length with the length of the current contig currentlength += contig_length # If the current length is now greater than the total genome / 2, the current contig length is the N50 if currentlength >= genome_length_dict[file_name] * 0.5: # Populate the dictionary, and break the loop n50_dict[file_name] = contig_length break return n50_dict
[ "def", "find_n50", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "n50_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# Initialise a...
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n50_dict: dictionary of strain name: N50
[ "Calculate", "the", "N50", "for", "each", "strain", ".", "N50", "is", "defined", "as", "the", "largest", "contig", "such", "that", "at", "least", "half", "of", "the", "total", "genome", "size", "is", "contained", "in", "contigs", "equal", "to", "or", "la...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L245-L266
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_n75
def find_n75(contig_lengths_dict, genome_length_dict): """ Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N75 """ # Initialise the dictionary n75_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.75: n75_dict[file_name] = contig_length break return n75_dict
python
def find_n75(contig_lengths_dict, genome_length_dict): """ Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N75 """ # Initialise the dictionary n75_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.75: n75_dict[file_name] = contig_length break return n75_dict
[ "def", "find_n75", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "n75_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the N75 for each strain. N75 is defined as the largest contig such that at least 3/4 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N75
[ "Calculate", "the", "N75", "for", "each", "strain", ".", "N75", "is", "defined", "as", "the", "largest", "contig", "such", "that", "at", "least", "3", "/", "4", "of", "the", "total", "genome", "size", "is", "contained", "in", "contigs", "equal", "to", ...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L269-L288
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_n90
def find_n90(contig_lengths_dict, genome_length_dict): """ Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N90 """ # Initialise the dictionary n90_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.95: n90_dict[file_name] = contig_length break return n90_dict
python
def find_n90(contig_lengths_dict, genome_length_dict): """ Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N90 """ # Initialise the dictionary n90_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.95: n90_dict[file_name] = contig_length break return n90_dict
[ "def", "find_n90", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "n90_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N90
[ "Calculate", "the", "N90", "for", "each", "strain", ".", "N90", "is", "defined", "as", "the", "largest", "contig", "such", "that", "at", "least", "9", "/", "10", "of", "the", "total", "genome", "size", "is", "contained", "in", "contigs", "equal", "to", ...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L291-L310
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_l50
def find_l50(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50 """ # Initialise the dictionary l50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.5: l50_dict[file_name] = currentcontig break return l50_dict
python
def find_l50(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50 """ # Initialise the dictionary l50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.5: l50_dict[file_name] = currentcontig break return l50_dict
[ "def", "find_l50", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "l50_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50
[ "Calculate", "the", "L50", "for", "each", "strain", ".", "L50", "is", "defined", "as", "the", "number", "of", "contigs", "required", "to", "achieve", "the", "N50", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L313-L334
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_l75
def find_l75(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L75 """ # Initialise the dictionary l75_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length currentcontig += 1 # Same logic as with the L75, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.75: l75_dict[file_name] = currentcontig break return l75_dict
python
def find_l75(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L75 """ # Initialise the dictionary l75_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length currentcontig += 1 # Same logic as with the L75, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.75: l75_dict[file_name] = currentcontig break return l75_dict
[ "def", "find_l75", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "l75_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the L50 for each strain. L75 is defined as the number of contigs required to achieve the N75 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L75
[ "Calculate", "the", "L50", "for", "each", "strain", ".", "L75", "is", "defined", "as", "the", "number", "of", "contigs", "required", "to", "achieve", "the", "N75", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L337-L356
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_l90
def find_l90(contig_lengths_dict, genome_length_dict): """ Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l90_dict: dictionary of strain name: L90 """ # Initialise the dictionary l90_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.9: l90_dict[file_name] = currentcontig break return l90_dict
python
def find_l90(contig_lengths_dict, genome_length_dict): """ Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l90_dict: dictionary of strain name: L90 """ # Initialise the dictionary l90_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.9: l90_dict[file_name] = currentcontig break return l90_dict
[ "def", "find_l90", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "l90_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the L90 for each strain. L90 is defined as the number of contigs required to achieve the N90 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l90_dict: dictionary of strain name: L90
[ "Calculate", "the", "L90", "for", "each", "strain", ".", "L90", "is", "defined", "as", "the", "number", "of", "contigs", "required", "to", "achieve", "the", "N90", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L359-L380
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
predict_orfs
def predict_orfs(file_dict, num_threads=1): """ Use prodigal to predict the number of open reading frames (ORFs) in each strain :param file_dict: dictionary of strain name: /sequencepath/strain_name.extension :param num_threads: number of threads to use in the pool of prodigal processes :return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco """ # Initialise the dictionary orf_file_dict = dict() prodigallist = list() for file_name, file_path in file_dict.items(): # Set the name of the output .sco results file results = os.path.splitext(file_path)[0] + '.sco' # Create the command for prodigal to execute - use sco output format prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco'] # Only run prodigal if the output file doesn't already exist if not os.path.isfile(results): prodigallist.append(prodigal) # Populate the dictionary with the name of the results file orf_file_dict[file_name] = results # Setup the multiprocessing pool. pool = multiprocessing.Pool(processes=num_threads) pool.map(run_prodigal, prodigallist) pool.close() pool.join() return orf_file_dict
python
def predict_orfs(file_dict, num_threads=1): """ Use prodigal to predict the number of open reading frames (ORFs) in each strain :param file_dict: dictionary of strain name: /sequencepath/strain_name.extension :param num_threads: number of threads to use in the pool of prodigal processes :return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco """ # Initialise the dictionary orf_file_dict = dict() prodigallist = list() for file_name, file_path in file_dict.items(): # Set the name of the output .sco results file results = os.path.splitext(file_path)[0] + '.sco' # Create the command for prodigal to execute - use sco output format prodigal = ['prodigal', '-i', file_path, '-o', results, '-f', 'sco'] # Only run prodigal if the output file doesn't already exist if not os.path.isfile(results): prodigallist.append(prodigal) # Populate the dictionary with the name of the results file orf_file_dict[file_name] = results # Setup the multiprocessing pool. pool = multiprocessing.Pool(processes=num_threads) pool.map(run_prodigal, prodigallist) pool.close() pool.join() return orf_file_dict
[ "def", "predict_orfs", "(", "file_dict", ",", "num_threads", "=", "1", ")", ":", "# Initialise the dictionary", "orf_file_dict", "=", "dict", "(", ")", "prodigallist", "=", "list", "(", ")", "for", "file_name", ",", "file_path", "in", "file_dict", ".", "items"...
Use prodigal to predict the number of open reading frames (ORFs) in each strain :param file_dict: dictionary of strain name: /sequencepath/strain_name.extension :param num_threads: number of threads to use in the pool of prodigal processes :return: orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco
[ "Use", "prodigal", "to", "predict", "the", "number", "of", "open", "reading", "frames", "(", "ORFs", ")", "in", "each", "strain", ":", "param", "file_dict", ":", "dictionary", "of", "strain", "name", ":", "/", "sequencepath", "/", "strain_name", ".", "exte...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L383-L408
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
find_orf_distribution
def find_orf_distribution(orf_file_dict): """ Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain :param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco :return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies """ # Initialise the dictionary orf_dist_dict = dict() for file_name, orf_report in orf_file_dict.items(): # Initialise variable to store the frequency of the different ORF size ranges total_orfs = 0 over_3000 = 0 over_1000 = 0 over_500 = 0 other = 0 # Open the strain-specific report with open(orf_report, 'r') as orfreport: for line in orfreport: # The report has a header section that can be ignored - only parse lines beginning with '>' if line.startswith('>'): # Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920, # direction: - contig, start, stop, direction = line.split('_') # The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575 size = int(stop) - int(start) # Increment the total number of ORFs before binning based on ORF size total_orfs += 1 # Increment the appropriate integer based on ORF size if size > 3000: over_3000 += 1 elif size > 1000: over_1000 += 1 elif size > 500: over_500 += 1 else: other += 1 # Populate the dictionary with a tuple of the ORF size range frequencies orf_dist_dict[file_name] = (total_orfs, over_3000, over_1000, over_500, other) # Clean-up the prodigal reports try: os.remove(orf_report) except IOError: pass return orf_dist_dict
python
def find_orf_distribution(orf_file_dict): """ Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain :param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco :return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies """ # Initialise the dictionary orf_dist_dict = dict() for file_name, orf_report in orf_file_dict.items(): # Initialise variable to store the frequency of the different ORF size ranges total_orfs = 0 over_3000 = 0 over_1000 = 0 over_500 = 0 other = 0 # Open the strain-specific report with open(orf_report, 'r') as orfreport: for line in orfreport: # The report has a header section that can be ignored - only parse lines beginning with '>' if line.startswith('>'): # Split the line on '_' characters e.g. >1_345_920_- yields contig: >1, start: 345, stop: 920, # direction: - contig, start, stop, direction = line.split('_') # The size of the ORF is the end position minus the start position e.g. 920 - 345 = 575 size = int(stop) - int(start) # Increment the total number of ORFs before binning based on ORF size total_orfs += 1 # Increment the appropriate integer based on ORF size if size > 3000: over_3000 += 1 elif size > 1000: over_1000 += 1 elif size > 500: over_500 += 1 else: other += 1 # Populate the dictionary with a tuple of the ORF size range frequencies orf_dist_dict[file_name] = (total_orfs, over_3000, over_1000, over_500, other) # Clean-up the prodigal reports try: os.remove(orf_report) except IOError: pass return orf_dist_dict
[ "def", "find_orf_distribution", "(", "orf_file_dict", ")", ":", "# Initialise the dictionary", "orf_dist_dict", "=", "dict", "(", ")", "for", "file_name", ",", "orf_report", "in", "orf_file_dict", ".", "items", "(", ")", ":", "# Initialise variable to store the frequenc...
Parse the prodigal outputs to determine the frequency of ORF size ranges for each strain :param orf_file_dict: dictionary of strain name: /sequencepath/prodigal results.sco :return: orf_dist_dict: dictionary of strain name: tuple of ORF size range distribution frequencies
[ "Parse", "the", "prodigal", "outputs", "to", "determine", "the", "frequency", "of", "ORF", "size", "ranges", "for", "each", "strain", ":", "param", "orf_file_dict", ":", "dictionary", "of", "strain", "name", ":", "/", "sequencepath", "/", "prodigal", "results"...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L416-L463
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
reporter
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath): """ Create a report of all the extracted features :param gc_dict: dictionary of strain name: GC% :param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies :param longest_contig_dict: dictionary of strain name: longest contig :param genome_length_dict: dictionary of strain name: total genome length :param num_contigs_dict: dictionary of strain name: total number of contigs :param n50_dict: dictionary of strain name: N50 :param n75_dict: dictionary of strain name: N75 :param n90_dict: dictionary of strain name: N90 :param l50_dict: dictionary of strain name: L50 :param l75_dict: dictionary of strain name: L75 :param l90_dict: dictionary of strain name: L90 :param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies :param genus_dict: dictionary of strain name: genus :param sequencepath: path of folder containing FASTA genomes """ # Initialise string with header information data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \ 'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \ 'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n' # Create and open the report for writign with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report: for file_name in sorted(longest_contig_dict): # Populate the data string with the appropriate values data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \ '{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \ '{l50},{l75},{l90},{gc},{genus}\n'\ .format(name=file_name, totlen=genome_length_dict[file_name], numcontigs=num_contigs_dict[file_name], longestcontig=longest_contig_dict[file_name], over_106=contig_dist_dict[file_name][0], over_56=contig_dist_dict[file_name][1], over_105=contig_dist_dict[file_name][2], over_55=contig_dist_dict[file_name][3], over_104=contig_dist_dict[file_name][4], over_54=contig_dist_dict[file_name][5], under_54=contig_dist_dict[file_name][6], tORFS=orf_dist_dict[file_name][0], ORF33=orf_dist_dict[file_name][1], ORF13=orf_dist_dict[file_name][2], ORF52=orf_dist_dict[file_name][3], ORF11=orf_dist_dict[file_name][4], n50=n50_dict[file_name], n75=n75_dict[file_name], n90=n90_dict[file_name], l50=l50_dict[file_name], l75=l75_dict[file_name], l90=l90_dict[file_name], gc=gc_dict[file_name], genus=genus_dict[file_name]) # Write the string to file feature_report.write(data)
python
def reporter(gc_dict, contig_dist_dict, longest_contig_dict, genome_length_dict, num_contigs_dict, n50_dict, n75_dict, n90_dict, l50_dict, l75_dict, l90_dict, orf_dist_dict, genus_dict, sequencepath): """ Create a report of all the extracted features :param gc_dict: dictionary of strain name: GC% :param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies :param longest_contig_dict: dictionary of strain name: longest contig :param genome_length_dict: dictionary of strain name: total genome length :param num_contigs_dict: dictionary of strain name: total number of contigs :param n50_dict: dictionary of strain name: N50 :param n75_dict: dictionary of strain name: N75 :param n90_dict: dictionary of strain name: N90 :param l50_dict: dictionary of strain name: L50 :param l75_dict: dictionary of strain name: L75 :param l90_dict: dictionary of strain name: L90 :param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies :param genus_dict: dictionary of strain name: genus :param sequencepath: path of folder containing FASTA genomes """ # Initialise string with header information data = 'SampleName,TotalLength,NumContigs,LongestContig,Contigs>1000000,Contigs>500000,Contigs>100000,' \ 'Contigs>50000,Contigs>10000,Contigs>5000,Contigs<5000,TotalORFs,ORFs>3000,ORFs>1000,ORFs>500,' \ 'ORFs<500,N50,N75,N90,L50,L75,L90,GC%,Genus\n' # Create and open the report for writign with open(os.path.join(sequencepath, 'extracted_features.csv'), 'w') as feature_report: for file_name in sorted(longest_contig_dict): # Populate the data string with the appropriate values data += '{name},{totlen},{numcontigs},{longestcontig},{over_106},{over_56},{over_105},{over_55},' \ '{over_104},{over_54},{under_54},{tORFS},{ORF33},{ORF13},{ORF52}, {ORF11},{n50},{n75},{n90},' \ '{l50},{l75},{l90},{gc},{genus}\n'\ .format(name=file_name, totlen=genome_length_dict[file_name], numcontigs=num_contigs_dict[file_name], longestcontig=longest_contig_dict[file_name], over_106=contig_dist_dict[file_name][0], over_56=contig_dist_dict[file_name][1], over_105=contig_dist_dict[file_name][2], over_55=contig_dist_dict[file_name][3], over_104=contig_dist_dict[file_name][4], over_54=contig_dist_dict[file_name][5], under_54=contig_dist_dict[file_name][6], tORFS=orf_dist_dict[file_name][0], ORF33=orf_dist_dict[file_name][1], ORF13=orf_dist_dict[file_name][2], ORF52=orf_dist_dict[file_name][3], ORF11=orf_dist_dict[file_name][4], n50=n50_dict[file_name], n75=n75_dict[file_name], n90=n90_dict[file_name], l50=l50_dict[file_name], l75=l75_dict[file_name], l90=l90_dict[file_name], gc=gc_dict[file_name], genus=genus_dict[file_name]) # Write the string to file feature_report.write(data)
[ "def", "reporter", "(", "gc_dict", ",", "contig_dist_dict", ",", "longest_contig_dict", ",", "genome_length_dict", ",", "num_contigs_dict", ",", "n50_dict", ",", "n75_dict", ",", "n90_dict", ",", "l50_dict", ",", "l75_dict", ",", "l90_dict", ",", "orf_dist_dict", ...
Create a report of all the extracted features :param gc_dict: dictionary of strain name: GC% :param contig_dist_dict: dictionary of strain: tuple of contig distribution frequencies :param longest_contig_dict: dictionary of strain name: longest contig :param genome_length_dict: dictionary of strain name: total genome length :param num_contigs_dict: dictionary of strain name: total number of contigs :param n50_dict: dictionary of strain name: N50 :param n75_dict: dictionary of strain name: N75 :param n90_dict: dictionary of strain name: N90 :param l50_dict: dictionary of strain name: L50 :param l75_dict: dictionary of strain name: L75 :param l90_dict: dictionary of strain name: L90 :param orf_dist_dict: dictionary of strain name: tuple of ORF length frequencies :param genus_dict: dictionary of strain name: genus :param sequencepath: path of folder containing FASTA genomes
[ "Create", "a", "report", "of", "all", "the", "extracted", "features", ":", "param", "gc_dict", ":", "dictionary", "of", "strain", "name", ":", "GC%", ":", "param", "contig_dist_dict", ":", "dictionary", "of", "strain", ":", "tuple", "of", "contig", "distribu...
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L466-L521
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
cli
def cli(sequencepath, report, refseq_database): """ Pass command line arguments to, and run the feature extraction functions """ main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
python
def cli(sequencepath, report, refseq_database): """ Pass command line arguments to, and run the feature extraction functions """ main(sequencepath, report, refseq_database, num_threads=multiprocessing.cpu_count())
[ "def", "cli", "(", "sequencepath", ",", "report", ",", "refseq_database", ")", ":", "main", "(", "sequencepath", ",", "report", ",", "refseq_database", ",", "num_threads", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")" ]
Pass command line arguments to, and run the feature extraction functions
[ "Pass", "command", "line", "arguments", "to", "and", "run", "the", "feature", "extraction", "functions" ]
train
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L539-L543
kmedian/grouplabelencode
grouplabelencode/grouplabelencode_func.py
grouplabelencode
def grouplabelencode(data, mapping, nacode=None, nastate=False): """Encode data array with grouped labels Parameters: ----------- data : list array with labels mapping : dict, list of list the index of each element is used as encoding. Each element is a single label (str) or list of labels that are mapped to the encoding. nacode : integer (Default: None) Encoding for unmapped states. nastate : bool If False (Default) unmatched data labels are encoded as nacode. If nastate=True (and nacode=None) then unmatched data labels are encoded with the integer nacode=len(mapping). """ # What value is used for missing data? if nastate: if nacode is None: nacode = len(mapping) # Process depending on the data type of the data mapping variable if isinstance(mapping, list): m = mapping e = range(len(mapping)) elif isinstance(mapping, dict): m = list(mapping.values()) e = list(mapping.keys()) else: raise Exception("'data' must be list-of-list or dict.") # Loop over 'data' array return grouplabelencode_loop(data, m, e, nacode=nacode)
python
def grouplabelencode(data, mapping, nacode=None, nastate=False): """Encode data array with grouped labels Parameters: ----------- data : list array with labels mapping : dict, list of list the index of each element is used as encoding. Each element is a single label (str) or list of labels that are mapped to the encoding. nacode : integer (Default: None) Encoding for unmapped states. nastate : bool If False (Default) unmatched data labels are encoded as nacode. If nastate=True (and nacode=None) then unmatched data labels are encoded with the integer nacode=len(mapping). """ # What value is used for missing data? if nastate: if nacode is None: nacode = len(mapping) # Process depending on the data type of the data mapping variable if isinstance(mapping, list): m = mapping e = range(len(mapping)) elif isinstance(mapping, dict): m = list(mapping.values()) e = list(mapping.keys()) else: raise Exception("'data' must be list-of-list or dict.") # Loop over 'data' array return grouplabelencode_loop(data, m, e, nacode=nacode)
[ "def", "grouplabelencode", "(", "data", ",", "mapping", ",", "nacode", "=", "None", ",", "nastate", "=", "False", ")", ":", "# What value is used for missing data?", "if", "nastate", ":", "if", "nacode", "is", "None", ":", "nacode", "=", "len", "(", "mapping...
Encode data array with grouped labels Parameters: ----------- data : list array with labels mapping : dict, list of list the index of each element is used as encoding. Each element is a single label (str) or list of labels that are mapped to the encoding. nacode : integer (Default: None) Encoding for unmapped states. nastate : bool If False (Default) unmatched data labels are encoded as nacode. If nastate=True (and nacode=None) then unmatched data labels are encoded with the integer nacode=len(mapping).
[ "Encode", "data", "array", "with", "grouped", "labels" ]
train
https://github.com/kmedian/grouplabelencode/blob/2494b3d6d6863a00c08d74a1cf96384a10305f00/grouplabelencode/grouplabelencode_func.py#L27-L64
Datary/scrapbag
scrapbag/csvs.py
get_csv_col_headers
def get_csv_col_headers(rows, row_headers_count_value=0): """ Retrieve csv column headers """ count = 0 if rows: for row in rows: if exclude_empty_values(row[:row_headers_count_value]): break count += 1 if len(rows) == count: count = 1 # by default return [r[row_headers_count_value:] for r in rows[:count]]
python
def get_csv_col_headers(rows, row_headers_count_value=0): """ Retrieve csv column headers """ count = 0 if rows: for row in rows: if exclude_empty_values(row[:row_headers_count_value]): break count += 1 if len(rows) == count: count = 1 # by default return [r[row_headers_count_value:] for r in rows[:count]]
[ "def", "get_csv_col_headers", "(", "rows", ",", "row_headers_count_value", "=", "0", ")", ":", "count", "=", "0", "if", "rows", ":", "for", "row", "in", "rows", ":", "if", "exclude_empty_values", "(", "row", "[", ":", "row_headers_count_value", "]", ")", "...
Retrieve csv column headers
[ "Retrieve", "csv", "column", "headers" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L25-L40
Datary/scrapbag
scrapbag/csvs.py
populate_csv_headers
def populate_csv_headers(rows, partial_headers, column_headers_count=1): """ Populate csv rows headers when are empty, extending the superior or upper headers. """ result = [''] * (len(rows) - column_headers_count) for i_index in range(0, len(partial_headers)): for k_index in range(0, len(partial_headers[i_index])): # missing field find for a value in upper rows if not partial_headers[i_index][k_index] and i_index - 1 >= 0: # TODO: It's necesary a for or only taking the # inmediate latest row works well?? for t_index in range(i_index - 1, -1, -1): # TODO: could suposse that allways a value exists partial_value = partial_headers[t_index][k_index] if partial_value: partial_headers[i_index][k_index] = partial_value break result[i_index] = " ".join(map(str, partial_headers[i_index])) return result
python
def populate_csv_headers(rows, partial_headers, column_headers_count=1): """ Populate csv rows headers when are empty, extending the superior or upper headers. """ result = [''] * (len(rows) - column_headers_count) for i_index in range(0, len(partial_headers)): for k_index in range(0, len(partial_headers[i_index])): # missing field find for a value in upper rows if not partial_headers[i_index][k_index] and i_index - 1 >= 0: # TODO: It's necesary a for or only taking the # inmediate latest row works well?? for t_index in range(i_index - 1, -1, -1): # TODO: could suposse that allways a value exists partial_value = partial_headers[t_index][k_index] if partial_value: partial_headers[i_index][k_index] = partial_value break result[i_index] = " ".join(map(str, partial_headers[i_index])) return result
[ "def", "populate_csv_headers", "(", "rows", ",", "partial_headers", ",", "column_headers_count", "=", "1", ")", ":", "result", "=", "[", "''", "]", "*", "(", "len", "(", "rows", ")", "-", "column_headers_count", ")", "for", "i_index", "in", "range", "(", ...
Populate csv rows headers when are empty, extending the superior or upper headers.
[ "Populate", "csv", "rows", "headers", "when", "are", "empty", "extending", "the", "superior", "or", "upper", "headers", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L58-L85
Datary/scrapbag
scrapbag/csvs.py
get_row_headers
def get_row_headers(rows, row_headers_count_value=0, column_headers_count=1): """ Return row headers. Assume that by default it has one column header. Assume that there is only one father row header. """ # TODO: REFACTOR ALGORITHM NEEDED partial_headers = [] if row_headers_count_value: # Take partial data for k_index in range(0, len(rows) - column_headers_count): header = rows[k_index + column_headers_count][ :row_headers_count_value] partial_headers.append(remove_list_duplicates(force_list(header))) # Populate headers populated_headers = populate_csv_headers( rows, partial_headers, column_headers_count) return populated_headers
python
def get_row_headers(rows, row_headers_count_value=0, column_headers_count=1): """ Return row headers. Assume that by default it has one column header. Assume that there is only one father row header. """ # TODO: REFACTOR ALGORITHM NEEDED partial_headers = [] if row_headers_count_value: # Take partial data for k_index in range(0, len(rows) - column_headers_count): header = rows[k_index + column_headers_count][ :row_headers_count_value] partial_headers.append(remove_list_duplicates(force_list(header))) # Populate headers populated_headers = populate_csv_headers( rows, partial_headers, column_headers_count) return populated_headers
[ "def", "get_row_headers", "(", "rows", ",", "row_headers_count_value", "=", "0", ",", "column_headers_count", "=", "1", ")", ":", "# TODO: REFACTOR ALGORITHM NEEDED", "partial_headers", "=", "[", "]", "if", "row_headers_count_value", ":", "# Take partial data", "for", ...
Return row headers. Assume that by default it has one column header. Assume that there is only one father row header.
[ "Return", "row", "headers", ".", "Assume", "that", "by", "default", "it", "has", "one", "column", "header", ".", "Assume", "that", "there", "is", "only", "one", "father", "row", "header", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L88-L111
Datary/scrapbag
scrapbag/csvs.py
retrieve_csv_data
def retrieve_csv_data(rows, row_header=0, column_header=0, limit_column=0): """ Take the data from the rows. """ return [row[row_header:limit_column] for row in rows[column_header:]]
python
def retrieve_csv_data(rows, row_header=0, column_header=0, limit_column=0): """ Take the data from the rows. """ return [row[row_header:limit_column] for row in rows[column_header:]]
[ "def", "retrieve_csv_data", "(", "rows", ",", "row_header", "=", "0", ",", "column_header", "=", "0", ",", "limit_column", "=", "0", ")", ":", "return", "[", "row", "[", "row_header", ":", "limit_column", "]", "for", "row", "in", "rows", "[", "column_hea...
Take the data from the rows.
[ "Take", "the", "data", "from", "the", "rows", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L114-L118
Datary/scrapbag
scrapbag/csvs.py
csv_tolist
def csv_tolist(path_to_file, **kwargs): """ Parse the csv file to a list of rows. """ result = [] encoding = kwargs.get('encoding', 'utf-8') delimiter = kwargs.get('delimiter', ',') dialect = kwargs.get('dialect', csv.excel) _, _ext = path_to_file.split('.', 1) try: file = codecs.open(path_to_file, 'r', encoding) items_file = io.TextIOWrapper(file, encoding=encoding) result = list( csv.reader(items_file, delimiter=delimiter, dialect=dialect)) items_file.close() file.close() except Exception as ex: result = [] logger.error('Fail parsing csv to list of rows - {}'.format(ex)) return result
python
def csv_tolist(path_to_file, **kwargs): """ Parse the csv file to a list of rows. """ result = [] encoding = kwargs.get('encoding', 'utf-8') delimiter = kwargs.get('delimiter', ',') dialect = kwargs.get('dialect', csv.excel) _, _ext = path_to_file.split('.', 1) try: file = codecs.open(path_to_file, 'r', encoding) items_file = io.TextIOWrapper(file, encoding=encoding) result = list( csv.reader(items_file, delimiter=delimiter, dialect=dialect)) items_file.close() file.close() except Exception as ex: result = [] logger.error('Fail parsing csv to list of rows - {}'.format(ex)) return result
[ "def", "csv_tolist", "(", "path_to_file", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "]", "encoding", "=", "kwargs", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "delimiter", "=", "kwargs", ".", "get", "(", "'delimiter'", ",", "','",...
Parse the csv file to a list of rows.
[ "Parse", "the", "csv", "file", "to", "a", "list", "of", "rows", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L121-L148
Datary/scrapbag
scrapbag/csvs.py
excel_todictlist
def excel_todictlist(path_to_file, **kwargs): """ Parse excel file to a dict list of sheets, rows. """ result = collections.OrderedDict() encoding = kwargs.get('encoding', 'utf-8') formatting_info = '.xlsx' not in path_to_file count = 0 with xlrd.open_workbook( path_to_file, encoding_override=encoding, formatting_info=formatting_info) \ as _excelfile: for sheet_name_raw in _excelfile.sheet_names(): # if empty sheet name put sheet# as name sheet_name = sheet_name_raw or "sheet{}".format(count) result[sheet_name] = [] xl_sheet = _excelfile.sheet_by_name(sheet_name_raw) for row_idx in range(0, xl_sheet.nrows): col_data = [] for col_idx in range(0, xl_sheet.ncols): # Get cell object by row, col cell_obj = xl_sheet.cell(row_idx, col_idx) merged_info = is_merged(xl_sheet, row_idx, col_idx) # Search for value in merged_info if not cell_obj.value and merged_info: cell_obj = search_mergedcell_value( xl_sheet, merged_info[1]) col_data.append(cell_obj.value if cell_obj else '') else: col_data.append(cell_obj.value) result[sheet_name].append(col_data) count += 1 # increase sheet counter return result
python
def excel_todictlist(path_to_file, **kwargs): """ Parse excel file to a dict list of sheets, rows. """ result = collections.OrderedDict() encoding = kwargs.get('encoding', 'utf-8') formatting_info = '.xlsx' not in path_to_file count = 0 with xlrd.open_workbook( path_to_file, encoding_override=encoding, formatting_info=formatting_info) \ as _excelfile: for sheet_name_raw in _excelfile.sheet_names(): # if empty sheet name put sheet# as name sheet_name = sheet_name_raw or "sheet{}".format(count) result[sheet_name] = [] xl_sheet = _excelfile.sheet_by_name(sheet_name_raw) for row_idx in range(0, xl_sheet.nrows): col_data = [] for col_idx in range(0, xl_sheet.ncols): # Get cell object by row, col cell_obj = xl_sheet.cell(row_idx, col_idx) merged_info = is_merged(xl_sheet, row_idx, col_idx) # Search for value in merged_info if not cell_obj.value and merged_info: cell_obj = search_mergedcell_value( xl_sheet, merged_info[1]) col_data.append(cell_obj.value if cell_obj else '') else: col_data.append(cell_obj.value) result[sheet_name].append(col_data) count += 1 # increase sheet counter return result
[ "def", "excel_todictlist", "(", "path_to_file", ",", "*", "*", "kwargs", ")", ":", "result", "=", "collections", ".", "OrderedDict", "(", ")", "encoding", "=", "kwargs", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "formatting_info", "=", "'.xlsx'", ...
Parse excel file to a dict list of sheets, rows.
[ "Parse", "excel", "file", "to", "a", "dict", "list", "of", "sheets", "rows", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L151-L193
Datary/scrapbag
scrapbag/csvs.py
search_mergedcell_value
def search_mergedcell_value(xl_sheet, merged_range): """ Search for a value in merged_range cells. """ for search_row_idx in range(merged_range[0], merged_range[1]): for search_col_idx in range(merged_range[2], merged_range[3]): if xl_sheet.cell(search_row_idx, search_col_idx).value: return xl_sheet.cell(search_row_idx, search_col_idx) return False
python
def search_mergedcell_value(xl_sheet, merged_range): """ Search for a value in merged_range cells. """ for search_row_idx in range(merged_range[0], merged_range[1]): for search_col_idx in range(merged_range[2], merged_range[3]): if xl_sheet.cell(search_row_idx, search_col_idx).value: return xl_sheet.cell(search_row_idx, search_col_idx) return False
[ "def", "search_mergedcell_value", "(", "xl_sheet", ",", "merged_range", ")", ":", "for", "search_row_idx", "in", "range", "(", "merged_range", "[", "0", "]", ",", "merged_range", "[", "1", "]", ")", ":", "for", "search_col_idx", "in", "range", "(", "merged_r...
Search for a value in merged_range cells.
[ "Search", "for", "a", "value", "in", "merged_range", "cells", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L196-L204
Datary/scrapbag
scrapbag/csvs.py
is_merged
def is_merged(sheet, row, column): """ Check if a row, column cell is a merged cell """ for cell_range in sheet.merged_cells: row_low, row_high, column_low, column_high = cell_range if (row in range(row_low, row_high)) and \ (column in range(column_low, column_high)): # TODO: IS NECESARY THIS IF? if ((column_high - column_low) < sheet.ncols - 1) and \ ((row_high - row_low) < sheet.nrows - 1): return (True, cell_range) return False
python
def is_merged(sheet, row, column): """ Check if a row, column cell is a merged cell """ for cell_range in sheet.merged_cells: row_low, row_high, column_low, column_high = cell_range if (row in range(row_low, row_high)) and \ (column in range(column_low, column_high)): # TODO: IS NECESARY THIS IF? if ((column_high - column_low) < sheet.ncols - 1) and \ ((row_high - row_low) < sheet.nrows - 1): return (True, cell_range) return False
[ "def", "is_merged", "(", "sheet", ",", "row", ",", "column", ")", ":", "for", "cell_range", "in", "sheet", ".", "merged_cells", ":", "row_low", ",", "row_high", ",", "column_low", ",", "column_high", "=", "cell_range", "if", "(", "row", "in", "range", "(...
Check if a row, column cell is a merged cell
[ "Check", "if", "a", "row", "column", "cell", "is", "a", "merged", "cell" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L207-L221
Datary/scrapbag
scrapbag/csvs.py
populate_headers
def populate_headers(headers): """ Concatenate headers with subheaders """ result = [''] * len(headers[0]) values = [''] * len(headers) for k_index in range(0, len(headers)): for i_index in range(0, len(headers[k_index])): if headers[k_index][i_index]: values[k_index] = normalizer( str(headers[k_index][i_index])) # pass to str if len(exclude_empty_values(result)) > i_index: result[i_index] += "-{}".format(values[k_index]) else: result[i_index] += str(values[k_index]) return result
python
def populate_headers(headers): """ Concatenate headers with subheaders """ result = [''] * len(headers[0]) values = [''] * len(headers) for k_index in range(0, len(headers)): for i_index in range(0, len(headers[k_index])): if headers[k_index][i_index]: values[k_index] = normalizer( str(headers[k_index][i_index])) # pass to str if len(exclude_empty_values(result)) > i_index: result[i_index] += "-{}".format(values[k_index]) else: result[i_index] += str(values[k_index]) return result
[ "def", "populate_headers", "(", "headers", ")", ":", "result", "=", "[", "''", "]", "*", "len", "(", "headers", "[", "0", "]", ")", "values", "=", "[", "''", "]", "*", "len", "(", "headers", ")", "for", "k_index", "in", "range", "(", "0", ",", ...
Concatenate headers with subheaders
[ "Concatenate", "headers", "with", "subheaders" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L224-L241
Datary/scrapbag
scrapbag/csvs.py
row_csv_limiter
def row_csv_limiter(rows, limits=None): """ Limit row passing a value or detect limits making the best effort. """ limits = [None, None] if limits is None else limits if len(exclude_empty_values(limits)) == 2: upper_limit = limits[0] lower_limit = limits[1] elif len(exclude_empty_values(limits)) == 1: upper_limit = limits[0] lower_limit = row_iter_limiter(rows, 1, -1, 1) else: upper_limit = row_iter_limiter(rows, 0, 1, 0) lower_limit = row_iter_limiter(rows, 1, -1, 1) return rows[upper_limit: lower_limit]
python
def row_csv_limiter(rows, limits=None): """ Limit row passing a value or detect limits making the best effort. """ limits = [None, None] if limits is None else limits if len(exclude_empty_values(limits)) == 2: upper_limit = limits[0] lower_limit = limits[1] elif len(exclude_empty_values(limits)) == 1: upper_limit = limits[0] lower_limit = row_iter_limiter(rows, 1, -1, 1) else: upper_limit = row_iter_limiter(rows, 0, 1, 0) lower_limit = row_iter_limiter(rows, 1, -1, 1) return rows[upper_limit: lower_limit]
[ "def", "row_csv_limiter", "(", "rows", ",", "limits", "=", "None", ")", ":", "limits", "=", "[", "None", ",", "None", "]", "if", "limits", "is", "None", "else", "limits", "if", "len", "(", "exclude_empty_values", "(", "limits", ")", ")", "==", "2", "...
Limit row passing a value or detect limits making the best effort.
[ "Limit", "row", "passing", "a", "value", "or", "detect", "limits", "making", "the", "best", "effort", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L244-L261
Datary/scrapbag
scrapbag/csvs.py
row_iter_limiter
def row_iter_limiter(rows, begin_row, way, c_value): """ Alghoritm to detect row limits when row have more that one column. Depending the init params find from the begin or behind. NOT SURE THAT IT WORKS WELL.. """ limit = None for index in range(begin_row, len(rows)): if not len(exclude_empty_values(rows[way * index])) == 1: limit = way * index + c_value if way * index + \ c_value not in [way * len(rows), 0] else None break return limit
python
def row_iter_limiter(rows, begin_row, way, c_value): """ Alghoritm to detect row limits when row have more that one column. Depending the init params find from the begin or behind. NOT SURE THAT IT WORKS WELL.. """ limit = None for index in range(begin_row, len(rows)): if not len(exclude_empty_values(rows[way * index])) == 1: limit = way * index + c_value if way * index + \ c_value not in [way * len(rows), 0] else None break return limit
[ "def", "row_iter_limiter", "(", "rows", ",", "begin_row", ",", "way", ",", "c_value", ")", ":", "limit", "=", "None", "for", "index", "in", "range", "(", "begin_row", ",", "len", "(", "rows", ")", ")", ":", "if", "not", "len", "(", "exclude_empty_value...
Alghoritm to detect row limits when row have more that one column. Depending the init params find from the begin or behind. NOT SURE THAT IT WORKS WELL..
[ "Alghoritm", "to", "detect", "row", "limits", "when", "row", "have", "more", "that", "one", "column", ".", "Depending", "the", "init", "params", "find", "from", "the", "begin", "or", "behind", ".", "NOT", "SURE", "THAT", "IT", "WORKS", "WELL", ".." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L264-L278
Datary/scrapbag
scrapbag/csvs.py
csv_row_cleaner
def csv_row_cleaner(rows): """ Clean row checking: - Not empty row. - >=1 element different in a row. - row allready in cleaned row result. """ result = [] for row in rows: # check not empty row check_empty = len(exclude_empty_values(row)) > 1 # check more or eq than 1 unique element in row check_set = len(set(exclude_empty_values(row))) > 1 # check row not into result cleaned rows. check_last_allready = (result and result[-1] == row) if check_empty and check_set and not check_last_allready: result.append(row) return result
python
def csv_row_cleaner(rows): """ Clean row checking: - Not empty row. - >=1 element different in a row. - row allready in cleaned row result. """ result = [] for row in rows: # check not empty row check_empty = len(exclude_empty_values(row)) > 1 # check more or eq than 1 unique element in row check_set = len(set(exclude_empty_values(row))) > 1 # check row not into result cleaned rows. check_last_allready = (result and result[-1] == row) if check_empty and check_set and not check_last_allready: result.append(row) return result
[ "def", "csv_row_cleaner", "(", "rows", ")", ":", "result", "=", "[", "]", "for", "row", "in", "rows", ":", "# check not empty row", "check_empty", "=", "len", "(", "exclude_empty_values", "(", "row", ")", ")", ">", "1", "# check more or eq than 1 unique element ...
Clean row checking: - Not empty row. - >=1 element different in a row. - row allready in cleaned row result.
[ "Clean", "row", "checking", ":", "-", "Not", "empty", "row", ".", "-", ">", "=", "1", "element", "different", "in", "a", "row", ".", "-", "row", "allready", "in", "cleaned", "row", "result", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L281-L304
Datary/scrapbag
scrapbag/csvs.py
csv_column_cleaner
def csv_column_cleaner(rows): """ clean csv columns parsed omitting empty/dirty rows. """ # check columns if there was empty columns result = [[] for x in range(0, len(rows))] for i_index in range(0, len(rows[0])): partial_values = [] for x_row in rows: partial_values.append( x_row[i_index] if len(x_row) > i_index else '') colum_rows = exclude_empty_values(partial_values) if len(colum_rows) > len(rows) / 5: # adjust this value for index in range(0, len(rows)): result[index].append( rows[index][i_index] if len(rows[index]) > i_index else '') return result
python
def csv_column_cleaner(rows): """ clean csv columns parsed omitting empty/dirty rows. """ # check columns if there was empty columns result = [[] for x in range(0, len(rows))] for i_index in range(0, len(rows[0])): partial_values = [] for x_row in rows: partial_values.append( x_row[i_index] if len(x_row) > i_index else '') colum_rows = exclude_empty_values(partial_values) if len(colum_rows) > len(rows) / 5: # adjust this value for index in range(0, len(rows)): result[index].append( rows[index][i_index] if len(rows[index]) > i_index else '') return result
[ "def", "csv_column_cleaner", "(", "rows", ")", ":", "# check columns if there was empty columns", "result", "=", "[", "[", "]", "for", "x", "in", "range", "(", "0", ",", "len", "(", "rows", ")", ")", "]", "for", "i_index", "in", "range", "(", "0", ",", ...
clean csv columns parsed omitting empty/dirty rows.
[ "clean", "csv", "columns", "parsed", "omitting", "empty", "/", "dirty", "rows", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L307-L328
Datary/scrapbag
scrapbag/csvs.py
csv_dict_format
def csv_dict_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Dict. """ # format dict if has row_headers if r_headers: result = {} for k_index in range(0, len(csv_data)): if r_headers[k_index]: result[r_headers[k_index]] = collections.OrderedDict( zip(c_headers, csv_data[k_index])) # format list if hasn't row_headers -- square csv else: result = [] for k_index in range(0, len(csv_data)): result.append( collections.OrderedDict(zip(c_headers, csv_data[k_index]))) result = [result] return result
python
def csv_dict_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Dict. """ # format dict if has row_headers if r_headers: result = {} for k_index in range(0, len(csv_data)): if r_headers[k_index]: result[r_headers[k_index]] = collections.OrderedDict( zip(c_headers, csv_data[k_index])) # format list if hasn't row_headers -- square csv else: result = [] for k_index in range(0, len(csv_data)): result.append( collections.OrderedDict(zip(c_headers, csv_data[k_index]))) result = [result] return result
[ "def", "csv_dict_format", "(", "csv_data", ",", "c_headers", "=", "None", ",", "r_headers", "=", "None", ")", ":", "# format dict if has row_headers", "if", "r_headers", ":", "result", "=", "{", "}", "for", "k_index", "in", "range", "(", "0", ",", "len", "...
Format csv rows parsed to Dict.
[ "Format", "csv", "rows", "parsed", "to", "Dict", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L338-L358
Datary/scrapbag
scrapbag/csvs.py
csv_array_clean_format
def csv_array_clean_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Array clean format. """ result = [] real_num_header = len(force_list(r_headers[0])) if r_headers else 0 result.append([""] * real_num_header + c_headers) for k_index in range(0, len(csv_data)): if r_headers: result.append( list( itertools.chain( [r_headers[k_index]], csv_data[k_index]))) else: result.append(csv_data[k_index]) return result
python
def csv_array_clean_format(csv_data, c_headers=None, r_headers=None): """ Format csv rows parsed to Array clean format. """ result = [] real_num_header = len(force_list(r_headers[0])) if r_headers else 0 result.append([""] * real_num_header + c_headers) for k_index in range(0, len(csv_data)): if r_headers: result.append( list( itertools.chain( [r_headers[k_index]], csv_data[k_index]))) else: result.append(csv_data[k_index]) return result
[ "def", "csv_array_clean_format", "(", "csv_data", ",", "c_headers", "=", "None", ",", "r_headers", "=", "None", ")", ":", "result", "=", "[", "]", "real_num_header", "=", "len", "(", "force_list", "(", "r_headers", "[", "0", "]", ")", ")", "if", "r_heade...
Format csv rows parsed to Array clean format.
[ "Format", "csv", "rows", "parsed", "to", "Array", "clean", "format", "." ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L361-L382
Datary/scrapbag
scrapbag/csvs.py
csv_format
def csv_format(csv_data, c_headers=None, r_headers=None, rows=None, **kwargs): """ Format csv rows parsed to Dict or Array """ result = None c_headers = [] if c_headers is None else c_headers r_headers = [] if r_headers is None else r_headers rows = [] if rows is None else rows result_format = kwargs.get('result_format', ARRAY_RAW_FORMAT) # DICT FORMAT if result_format == DICT_FORMAT: result = csv_dict_format(csv_data, c_headers, r_headers) # ARRAY_RAW_FORMAT elif result_format == ARRAY_RAW_FORMAT: result = rows # ARRAY_CLEAN_FORMAT elif result_format == ARRAY_CLEAN_FORMAT: result = csv_array_clean_format(csv_data, c_headers, r_headers) else: result = None # DEFAULT if result and result_format < DICT_FORMAT: result = [result] return result
python
def csv_format(csv_data, c_headers=None, r_headers=None, rows=None, **kwargs): """ Format csv rows parsed to Dict or Array """ result = None c_headers = [] if c_headers is None else c_headers r_headers = [] if r_headers is None else r_headers rows = [] if rows is None else rows result_format = kwargs.get('result_format', ARRAY_RAW_FORMAT) # DICT FORMAT if result_format == DICT_FORMAT: result = csv_dict_format(csv_data, c_headers, r_headers) # ARRAY_RAW_FORMAT elif result_format == ARRAY_RAW_FORMAT: result = rows # ARRAY_CLEAN_FORMAT elif result_format == ARRAY_CLEAN_FORMAT: result = csv_array_clean_format(csv_data, c_headers, r_headers) else: result = None # DEFAULT if result and result_format < DICT_FORMAT: result = [result] return result
[ "def", "csv_format", "(", "csv_data", ",", "c_headers", "=", "None", ",", "r_headers", "=", "None", ",", "rows", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "None", "c_headers", "=", "[", "]", "if", "c_headers", "is", "None", "else...
Format csv rows parsed to Dict or Array
[ "Format", "csv", "rows", "parsed", "to", "Dict", "or", "Array" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L385-L415
Datary/scrapbag
scrapbag/csvs.py
csv_to_dict
def csv_to_dict(csv_filepath, **kwargs): """ Turn csv into dict. Args: :csv_filepath: path to csv file to turn into dict. :limits: path to csv file to turn into dict """ callbacks = {'to_list': csv_tolist, 'row_csv_limiter': row_csv_limiter, 'csv_row_cleaner': csv_row_cleaner, 'row_headers_count': row_headers_count, 'get_col_header': get_csv_col_headers, 'get_row_headers': get_row_headers, 'populate_headers': populate_headers, 'csv_column_header_cleaner': csv_column_header_cleaner, 'csv_column_cleaner': csv_column_cleaner, 'retrieve_csv_data': retrieve_csv_data} callbacks.update(kwargs.get('alt_callbacks', {})) rows = kwargs.get('rows', []) if not rows: # csv_tolist of rows rows = callbacks.get('to_list')(csv_filepath, **kwargs) if not rows: msg = 'Empty rows obtained from {}'.format(csv_filepath) logger.warning(msg) raise ValueError(msg) # apply limits rows = callbacks.get('row_csv_limiter')( rows, kwargs.get('limits', [None, None])) # apply row cleaner rows = callbacks.get('csv_row_cleaner')(rows) # apply column cleaner rows = callbacks.get('csv_column_cleaner')(rows) # count raw headers num_row_headers = callbacks.get('row_headers_count')(rows) # take colum_headers c_headers_raw = callbacks.get('get_col_header')(rows, num_row_headers) # get row_headers r_headers = callbacks.get('get_row_headers')( rows, num_row_headers, len(c_headers_raw)) # format colum_headers c_headers_dirty = callbacks.get('populate_headers')( c_headers_raw) if len(c_headers_raw) > 1 else c_headers_raw[0] # Clean csv column headers of empty values. c_headers = callbacks.get('csv_column_header_cleaner')(c_headers_dirty) # take data csv_data = callbacks.get('retrieve_csv_data')( rows, column_header=len(c_headers_raw), row_header=num_row_headers, limit_column=len(c_headers) - len(c_headers_dirty) or None) # Check column headers validation if csv_data: assert len(c_headers) == len(csv_data[0]) # Check row headers validation if r_headers: assert len(r_headers) == len(csv_data) # Transform rows into dict zipping the headers. kwargs.pop('rows', None) result = csv_format(csv_data, c_headers, r_headers, rows, **kwargs) return result
python
def csv_to_dict(csv_filepath, **kwargs): """ Turn csv into dict. Args: :csv_filepath: path to csv file to turn into dict. :limits: path to csv file to turn into dict """ callbacks = {'to_list': csv_tolist, 'row_csv_limiter': row_csv_limiter, 'csv_row_cleaner': csv_row_cleaner, 'row_headers_count': row_headers_count, 'get_col_header': get_csv_col_headers, 'get_row_headers': get_row_headers, 'populate_headers': populate_headers, 'csv_column_header_cleaner': csv_column_header_cleaner, 'csv_column_cleaner': csv_column_cleaner, 'retrieve_csv_data': retrieve_csv_data} callbacks.update(kwargs.get('alt_callbacks', {})) rows = kwargs.get('rows', []) if not rows: # csv_tolist of rows rows = callbacks.get('to_list')(csv_filepath, **kwargs) if not rows: msg = 'Empty rows obtained from {}'.format(csv_filepath) logger.warning(msg) raise ValueError(msg) # apply limits rows = callbacks.get('row_csv_limiter')( rows, kwargs.get('limits', [None, None])) # apply row cleaner rows = callbacks.get('csv_row_cleaner')(rows) # apply column cleaner rows = callbacks.get('csv_column_cleaner')(rows) # count raw headers num_row_headers = callbacks.get('row_headers_count')(rows) # take colum_headers c_headers_raw = callbacks.get('get_col_header')(rows, num_row_headers) # get row_headers r_headers = callbacks.get('get_row_headers')( rows, num_row_headers, len(c_headers_raw)) # format colum_headers c_headers_dirty = callbacks.get('populate_headers')( c_headers_raw) if len(c_headers_raw) > 1 else c_headers_raw[0] # Clean csv column headers of empty values. c_headers = callbacks.get('csv_column_header_cleaner')(c_headers_dirty) # take data csv_data = callbacks.get('retrieve_csv_data')( rows, column_header=len(c_headers_raw), row_header=num_row_headers, limit_column=len(c_headers) - len(c_headers_dirty) or None) # Check column headers validation if csv_data: assert len(c_headers) == len(csv_data[0]) # Check row headers validation if r_headers: assert len(r_headers) == len(csv_data) # Transform rows into dict zipping the headers. kwargs.pop('rows', None) result = csv_format(csv_data, c_headers, r_headers, rows, **kwargs) return result
[ "def", "csv_to_dict", "(", "csv_filepath", ",", "*", "*", "kwargs", ")", ":", "callbacks", "=", "{", "'to_list'", ":", "csv_tolist", ",", "'row_csv_limiter'", ":", "row_csv_limiter", ",", "'csv_row_cleaner'", ":", "csv_row_cleaner", ",", "'row_headers_count'", ":"...
Turn csv into dict. Args: :csv_filepath: path to csv file to turn into dict. :limits: path to csv file to turn into dict
[ "Turn", "csv", "into", "dict", ".", "Args", ":", ":", "csv_filepath", ":", "path", "to", "csv", "file", "to", "turn", "into", "dict", ".", ":", "limits", ":", "path", "to", "csv", "file", "to", "turn", "into", "dict" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L418-L494
Datary/scrapbag
scrapbag/csvs.py
excel_to_dict
def excel_to_dict(excel_filepath, encapsulate_filepath=False, **kwargs): """ Turn excel into dict. Args: :excel_filepath: path to excel file to turn into dict. :limits: path to csv file to turn into dict """ result = {} try: callbacks = {'to_dictlist': excel_todictlist} # Default callback callbacks.update(kwargs.get('alt_callbacks', {})) # Retrieve excel data as dict of sheets lists excel_data = callbacks.get('to_dictlist')(excel_filepath, **kwargs) for sheet in excel_data.keys(): try: kwargs['rows'] = excel_data.get(sheet, []) result[sheet] = csv_to_dict(excel_filepath, **kwargs) except Exception as ex: logger.error('Fail to parse sheet {} - {}'.format(sheet, ex)) result[sheet] = [] continue if encapsulate_filepath: result = {excel_filepath: result} except Exception as ex: msg = 'Fail transform excel to dict - {}'.format(ex) logger.error(msg, excel_filepath=excel_filepath) return result
python
def excel_to_dict(excel_filepath, encapsulate_filepath=False, **kwargs): """ Turn excel into dict. Args: :excel_filepath: path to excel file to turn into dict. :limits: path to csv file to turn into dict """ result = {} try: callbacks = {'to_dictlist': excel_todictlist} # Default callback callbacks.update(kwargs.get('alt_callbacks', {})) # Retrieve excel data as dict of sheets lists excel_data = callbacks.get('to_dictlist')(excel_filepath, **kwargs) for sheet in excel_data.keys(): try: kwargs['rows'] = excel_data.get(sheet, []) result[sheet] = csv_to_dict(excel_filepath, **kwargs) except Exception as ex: logger.error('Fail to parse sheet {} - {}'.format(sheet, ex)) result[sheet] = [] continue if encapsulate_filepath: result = {excel_filepath: result} except Exception as ex: msg = 'Fail transform excel to dict - {}'.format(ex) logger.error(msg, excel_filepath=excel_filepath) return result
[ "def", "excel_to_dict", "(", "excel_filepath", ",", "encapsulate_filepath", "=", "False", ",", "*", "*", "kwargs", ")", ":", "result", "=", "{", "}", "try", ":", "callbacks", "=", "{", "'to_dictlist'", ":", "excel_todictlist", "}", "# Default callback", "callb...
Turn excel into dict. Args: :excel_filepath: path to excel file to turn into dict. :limits: path to csv file to turn into dict
[ "Turn", "excel", "into", "dict", ".", "Args", ":", ":", "excel_filepath", ":", "path", "to", "excel", "file", "to", "turn", "into", "dict", ".", ":", "limits", ":", "path", "to", "csv", "file", "to", "turn", "into", "dict" ]
train
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L497-L527
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification.notify
def notify( self, force_notify=None, use_email=None, use_sms=None, email_body_template=None, **kwargs, ): """Notify / send an email and/or SMS. Main entry point. This notification class (me) knows from whom and to whom the notifications will be sent. See signals and kwargs are: * history_instance * instance * user """ email_sent = None sms_sent = None use_email = use_email or getattr(settings, "EMAIL_ENABLED", False) use_sms = use_sms or getattr(settings, "TWILIO_ENABLED", False) if force_notify or self._notify_on_condition(**kwargs): if use_email: email_body_template = ( email_body_template or self.email_body_template ) + self.email_footer_template email_sent = self.send_email( email_body_template=email_body_template, **kwargs ) if use_sms: sms_sent = self.send_sms(**kwargs) self.post_notification_actions( email_sent=email_sent, sms_sent=sms_sent, **kwargs ) return True if email_sent or sms_sent else False
python
def notify( self, force_notify=None, use_email=None, use_sms=None, email_body_template=None, **kwargs, ): """Notify / send an email and/or SMS. Main entry point. This notification class (me) knows from whom and to whom the notifications will be sent. See signals and kwargs are: * history_instance * instance * user """ email_sent = None sms_sent = None use_email = use_email or getattr(settings, "EMAIL_ENABLED", False) use_sms = use_sms or getattr(settings, "TWILIO_ENABLED", False) if force_notify or self._notify_on_condition(**kwargs): if use_email: email_body_template = ( email_body_template or self.email_body_template ) + self.email_footer_template email_sent = self.send_email( email_body_template=email_body_template, **kwargs ) if use_sms: sms_sent = self.send_sms(**kwargs) self.post_notification_actions( email_sent=email_sent, sms_sent=sms_sent, **kwargs ) return True if email_sent or sms_sent else False
[ "def", "notify", "(", "self", ",", "force_notify", "=", "None", ",", "use_email", "=", "None", ",", "use_sms", "=", "None", ",", "email_body_template", "=", "None", ",", "*", "*", "kwargs", ",", ")", ":", "email_sent", "=", "None", "sms_sent", "=", "No...
Notify / send an email and/or SMS. Main entry point. This notification class (me) knows from whom and to whom the notifications will be sent. See signals and kwargs are: * history_instance * instance * user
[ "Notify", "/", "send", "an", "email", "and", "/", "or", "SMS", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L91-L128
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification._notify_on_condition
def _notify_on_condition(self, test_message=None, **kwargs): """Returns the value of `notify_on_condition` or False. """ if test_message: return True else: return self.enabled and self.notify_on_condition(**kwargs)
python
def _notify_on_condition(self, test_message=None, **kwargs): """Returns the value of `notify_on_condition` or False. """ if test_message: return True else: return self.enabled and self.notify_on_condition(**kwargs)
[ "def", "_notify_on_condition", "(", "self", ",", "test_message", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "test_message", ":", "return", "True", "else", ":", "return", "self", ".", "enabled", "and", "self", ".", "notify_on_condition", "(", "*"...
Returns the value of `notify_on_condition` or False.
[ "Returns", "the", "value", "of", "notify_on_condition", "or", "False", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L138-L144
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification.enabled
def enabled(self): """Returns True if this notification is enabled based on the value of Notification model instance. Note: Notification names/display_names are persisted in the "Notification" model where each mode instance can be flagged as enabled or not, and are selected/subscribed to by each user in their user profile. See also: `site_notifications.update_notification_list` """ if not self._notification_enabled: self._notification_enabled = self.notification_model.enabled return self._notification_enabled
python
def enabled(self): """Returns True if this notification is enabled based on the value of Notification model instance. Note: Notification names/display_names are persisted in the "Notification" model where each mode instance can be flagged as enabled or not, and are selected/subscribed to by each user in their user profile. See also: `site_notifications.update_notification_list` """ if not self._notification_enabled: self._notification_enabled = self.notification_model.enabled return self._notification_enabled
[ "def", "enabled", "(", "self", ")", ":", "if", "not", "self", ".", "_notification_enabled", ":", "self", ".", "_notification_enabled", "=", "self", ".", "notification_model", ".", "enabled", "return", "self", ".", "_notification_enabled" ]
Returns True if this notification is enabled based on the value of Notification model instance. Note: Notification names/display_names are persisted in the "Notification" model where each mode instance can be flagged as enabled or not, and are selected/subscribed to by each user in their user profile. See also: `site_notifications.update_notification_list`
[ "Returns", "True", "if", "this", "notification", "is", "enabled", "based", "on", "the", "value", "of", "Notification", "model", "instance", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L150-L163
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification.notification_model
def notification_model(self): """Returns the Notification 'model' instance associated with this notification. """ NotificationModel = django_apps.get_model("edc_notification.notification") # trigger exception if this class is not registered. site_notifications.get(self.name) try: notification_model = NotificationModel.objects.get(name=self.name) except ObjectDoesNotExist: site_notifications.update_notification_list() notification_model = NotificationModel.objects.get(name=self.name) return notification_model
python
def notification_model(self): """Returns the Notification 'model' instance associated with this notification. """ NotificationModel = django_apps.get_model("edc_notification.notification") # trigger exception if this class is not registered. site_notifications.get(self.name) try: notification_model = NotificationModel.objects.get(name=self.name) except ObjectDoesNotExist: site_notifications.update_notification_list() notification_model = NotificationModel.objects.get(name=self.name) return notification_model
[ "def", "notification_model", "(", "self", ")", ":", "NotificationModel", "=", "django_apps", ".", "get_model", "(", "\"edc_notification.notification\"", ")", "# trigger exception if this class is not registered.", "site_notifications", ".", "get", "(", "self", ".", "name", ...
Returns the Notification 'model' instance associated with this notification.
[ "Returns", "the", "Notification", "model", "instance", "associated", "with", "this", "notification", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L166-L178
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification.get_template_options
def get_template_options(self, instance=None, test_message=None, **kwargs): """Returns a dictionary of message template options. Extend using `extra_template_options`. """ protocol_name = django_apps.get_app_config("edc_protocol").protocol_name test_message = test_message or self.test_message template_options = dict( name=self.name, protocol_name=protocol_name, display_name=self.display_name, email_from=self.email_from, test_subject_line=( self.email_test_subject_line if test_message else "" ).strip(), test_body_line=self.email_test_body_line if test_message else "", test_line=self.sms_test_line if test_message else "", message_datetime=get_utcnow(), message_reference="", ) if "subject_identifier" not in template_options: try: template_options.update(subject_identifier=instance.subject_identifier) except AttributeError: pass if "site_name" not in template_options: try: template_options.update(site_name=instance.site.name.title()) except AttributeError: pass return template_options
python
def get_template_options(self, instance=None, test_message=None, **kwargs): """Returns a dictionary of message template options. Extend using `extra_template_options`. """ protocol_name = django_apps.get_app_config("edc_protocol").protocol_name test_message = test_message or self.test_message template_options = dict( name=self.name, protocol_name=protocol_name, display_name=self.display_name, email_from=self.email_from, test_subject_line=( self.email_test_subject_line if test_message else "" ).strip(), test_body_line=self.email_test_body_line if test_message else "", test_line=self.sms_test_line if test_message else "", message_datetime=get_utcnow(), message_reference="", ) if "subject_identifier" not in template_options: try: template_options.update(subject_identifier=instance.subject_identifier) except AttributeError: pass if "site_name" not in template_options: try: template_options.update(site_name=instance.site.name.title()) except AttributeError: pass return template_options
[ "def", "get_template_options", "(", "self", ",", "instance", "=", "None", ",", "test_message", "=", "None", ",", "*", "*", "kwargs", ")", ":", "protocol_name", "=", "django_apps", ".", "get_app_config", "(", "\"edc_protocol\"", ")", ".", "protocol_name", "test...
Returns a dictionary of message template options. Extend using `extra_template_options`.
[ "Returns", "a", "dictionary", "of", "message", "template", "options", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L180-L210
clinicedc/edc-notification
edc_notification/notification/notification.py
Notification.sms_recipients
def sms_recipients(self): """Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. """ sms_recipients = [] UserProfile = django_apps.get_model("edc_auth.UserProfile") for user_profile in UserProfile.objects.filter( user__is_active=True, user__is_staff=True ): try: user_profile.sms_notifications.get(name=self.name) except ObjectDoesNotExist: pass else: if user_profile.mobile: sms_recipients.append(user_profile.mobile) return sms_recipients
python
def sms_recipients(self): """Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. """ sms_recipients = [] UserProfile = django_apps.get_model("edc_auth.UserProfile") for user_profile in UserProfile.objects.filter( user__is_active=True, user__is_staff=True ): try: user_profile.sms_notifications.get(name=self.name) except ObjectDoesNotExist: pass else: if user_profile.mobile: sms_recipients.append(user_profile.mobile) return sms_recipients
[ "def", "sms_recipients", "(", "self", ")", ":", "sms_recipients", "=", "[", "]", "UserProfile", "=", "django_apps", ".", "get_model", "(", "\"edc_auth.UserProfile\"", ")", "for", "user_profile", "in", "UserProfile", ".", "objects", ".", "filter", "(", "user__is_...
Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile.
[ "Returns", "a", "list", "of", "recipients", "subscribed", "to", "receive", "SMS", "s", "for", "this", "notifications", "class", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L256-L274
chewse/djangorestframework-signed-permissions
signedpermissions/signing.py
sign_filter_permissions
def sign_filter_permissions(permissions): """ Return a compressed, signed dump of the json blob. This function expects a json blob that is a dictionary containing model dotted names as keys. Those keys each have a value that is a list of dictionaries, each of which contains the keys 'filters' and 'actions': The key 'filters' key is a dict that is a filter to be applied to a django queryset. The key 'actions' is a list of DRF methods that can be called for this model's viewset. For example: { 'accounts.Account': [ { 'filters': { 'email': 'marcel@chewse.com', 'organizations__name': 'Chewse' }, 'actions': ['create', 'partial_update'] } ] } """ permissions = {key.lower(): value for key, value in permissions.iteritems()} return signing.dumps(permissions, compress=True)
python
def sign_filter_permissions(permissions): """ Return a compressed, signed dump of the json blob. This function expects a json blob that is a dictionary containing model dotted names as keys. Those keys each have a value that is a list of dictionaries, each of which contains the keys 'filters' and 'actions': The key 'filters' key is a dict that is a filter to be applied to a django queryset. The key 'actions' is a list of DRF methods that can be called for this model's viewset. For example: { 'accounts.Account': [ { 'filters': { 'email': 'marcel@chewse.com', 'organizations__name': 'Chewse' }, 'actions': ['create', 'partial_update'] } ] } """ permissions = {key.lower(): value for key, value in permissions.iteritems()} return signing.dumps(permissions, compress=True)
[ "def", "sign_filter_permissions", "(", "permissions", ")", ":", "permissions", "=", "{", "key", ".", "lower", "(", ")", ":", "value", "for", "key", ",", "value", "in", "permissions", ".", "iteritems", "(", ")", "}", "return", "signing", ".", "dumps", "("...
Return a compressed, signed dump of the json blob. This function expects a json blob that is a dictionary containing model dotted names as keys. Those keys each have a value that is a list of dictionaries, each of which contains the keys 'filters' and 'actions': The key 'filters' key is a dict that is a filter to be applied to a django queryset. The key 'actions' is a list of DRF methods that can be called for this model's viewset. For example: { 'accounts.Account': [ { 'filters': { 'email': 'marcel@chewse.com', 'organizations__name': 'Chewse' }, 'actions': ['create', 'partial_update'] } ] }
[ "Return", "a", "compressed", "signed", "dump", "of", "the", "json", "blob", "." ]
train
https://github.com/chewse/djangorestframework-signed-permissions/blob/b1cc4c57999fc5be8361f60f0ada1d777b27feab/signedpermissions/signing.py#L6-L32
chewse/djangorestframework-signed-permissions
signedpermissions/signing.py
unsign_filters_and_actions
def unsign_filters_and_actions(sign, dotted_model_name): """Return the list of filters and actions for dotted_model_name.""" permissions = signing.loads(sign) return permissions.get(dotted_model_name, [])
python
def unsign_filters_and_actions(sign, dotted_model_name): """Return the list of filters and actions for dotted_model_name.""" permissions = signing.loads(sign) return permissions.get(dotted_model_name, [])
[ "def", "unsign_filters_and_actions", "(", "sign", ",", "dotted_model_name", ")", ":", "permissions", "=", "signing", ".", "loads", "(", "sign", ")", "return", "permissions", ".", "get", "(", "dotted_model_name", ",", "[", "]", ")" ]
Return the list of filters and actions for dotted_model_name.
[ "Return", "the", "list", "of", "filters", "and", "actions", "for", "dotted_model_name", "." ]
train
https://github.com/chewse/djangorestframework-signed-permissions/blob/b1cc4c57999fc5be8361f60f0ada1d777b27feab/signedpermissions/signing.py#L35-L38
jacebrowning/comparable
comparable/base.py
equal
def equal(obj1, obj2): """Calculate equality between two (Comparable) objects.""" Comparable.log(obj1, obj2, '==') equality = obj1.equality(obj2) Comparable.log(obj1, obj2, '==', result=equality) return equality
python
def equal(obj1, obj2): """Calculate equality between two (Comparable) objects.""" Comparable.log(obj1, obj2, '==') equality = obj1.equality(obj2) Comparable.log(obj1, obj2, '==', result=equality) return equality
[ "def", "equal", "(", "obj1", ",", "obj2", ")", ":", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'=='", ")", "equality", "=", "obj1", ".", "equality", "(", "obj2", ")", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'=='", ...
Calculate equality between two (Comparable) objects.
[ "Calculate", "equality", "between", "two", "(", "Comparable", ")", "objects", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L127-L132
jacebrowning/comparable
comparable/base.py
similar
def similar(obj1, obj2): """Calculate similarity between two (Comparable) objects.""" Comparable.log(obj1, obj2, '%') similarity = obj1.similarity(obj2) Comparable.log(obj1, obj2, '%', result=similarity) return similarity
python
def similar(obj1, obj2): """Calculate similarity between two (Comparable) objects.""" Comparable.log(obj1, obj2, '%') similarity = obj1.similarity(obj2) Comparable.log(obj1, obj2, '%', result=similarity) return similarity
[ "def", "similar", "(", "obj1", ",", "obj2", ")", ":", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'%'", ")", "similarity", "=", "obj1", ".", "similarity", "(", "obj2", ")", "Comparable", ".", "log", "(", "obj1", ",", "obj2", ",", "'%'...
Calculate similarity between two (Comparable) objects.
[ "Calculate", "similarity", "between", "two", "(", "Comparable", ")", "objects", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L135-L140
jacebrowning/comparable
comparable/base.py
_Base._repr
def _repr(self, *args, **kwargs): """Return a __repr__ string from the arguments provided to __init__. @param args: list of arguments to __init__ @param kwargs: dictionary of keyword arguments to __init__ @return: __repr__ string """ # Remove unnecessary empty keywords arguments and sort the arguments kwargs = {k: v for k, v in kwargs.items() if v is not None} kwargs = OrderedDict(sorted(kwargs.items())) # Build the __repr__ string pieces args_repr = ', '.join(repr(arg) for arg in args) kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items()) if args_repr and kwargs_repr: kwargs_repr = ', ' + kwargs_repr name = self.__class__.__name__ return "{}({}{})".format(name, args_repr, kwargs_repr)
python
def _repr(self, *args, **kwargs): """Return a __repr__ string from the arguments provided to __init__. @param args: list of arguments to __init__ @param kwargs: dictionary of keyword arguments to __init__ @return: __repr__ string """ # Remove unnecessary empty keywords arguments and sort the arguments kwargs = {k: v for k, v in kwargs.items() if v is not None} kwargs = OrderedDict(sorted(kwargs.items())) # Build the __repr__ string pieces args_repr = ', '.join(repr(arg) for arg in args) kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items()) if args_repr and kwargs_repr: kwargs_repr = ', ' + kwargs_repr name = self.__class__.__name__ return "{}({}{})".format(name, args_repr, kwargs_repr)
[ "def", "_repr", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Remove unnecessary empty keywords arguments and sort the arguments", "kwargs", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if",...
Return a __repr__ string from the arguments provided to __init__. @param args: list of arguments to __init__ @param kwargs: dictionary of keyword arguments to __init__ @return: __repr__ string
[ "Return", "a", "__repr__", "string", "from", "the", "arguments", "provided", "to", "__init__", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L12-L31
jacebrowning/comparable
comparable/base.py
Comparable.equality
def equality(self, other): """Compare two objects for equality. @param self: first object to compare @param other: second object to compare @return: boolean result of comparison """ # Compare specified attributes for equality cname = self.__class__.__name__ for aname in self.attributes: try: attr1 = getattr(self, aname) attr2 = getattr(other, aname) except AttributeError as error: logging.debug("%s.%s: %s", cname, aname, error) return False self.log(attr1, attr2, '==', cname=cname, aname=aname) eql = (attr1 == attr2) self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql) if not eql: return False return True
python
def equality(self, other): """Compare two objects for equality. @param self: first object to compare @param other: second object to compare @return: boolean result of comparison """ # Compare specified attributes for equality cname = self.__class__.__name__ for aname in self.attributes: try: attr1 = getattr(self, aname) attr2 = getattr(other, aname) except AttributeError as error: logging.debug("%s.%s: %s", cname, aname, error) return False self.log(attr1, attr2, '==', cname=cname, aname=aname) eql = (attr1 == attr2) self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql) if not eql: return False return True
[ "def", "equality", "(", "self", ",", "other", ")", ":", "# Compare specified attributes for equality", "cname", "=", "self", ".", "__class__", ".", "__name__", "for", "aname", "in", "self", ".", "attributes", ":", "try", ":", "attr1", "=", "getattr", "(", "s...
Compare two objects for equality. @param self: first object to compare @param other: second object to compare @return: boolean result of comparison
[ "Compare", "two", "objects", "for", "equality", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L179-L203
jacebrowning/comparable
comparable/base.py
Comparable.similarity
def similarity(self, other): """Compare two objects for similarity. @param self: first object to compare @param other: second object to compare @return: L{Similarity} result of comparison """ sim = self.Similarity() total = 0.0 # Calculate similarity ratio for each attribute cname = self.__class__.__name__ for aname, weight in self.attributes.items(): attr1 = getattr(self, aname, None) attr2 = getattr(other, aname, None) self.log(attr1, attr2, '%', cname=cname, aname=aname) # Similarity is ignored if None on both objects if attr1 is None and attr2 is None: self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes are both None") continue # Similarity is 0 if either attribute is non-Comparable if not all((isinstance(attr1, Comparable), isinstance(attr2, Comparable))): self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes not Comparable") total += weight continue # Calculate similarity between the attributes attr_sim = (attr1 % attr2) self.log(attr1, attr2, '%', cname=cname, aname=aname, result=attr_sim) # Add the similarity to the total sim += attr_sim * weight total += weight # Scale the similarity so the total is 1.0 if total: sim *= (1.0 / total) return sim
python
def similarity(self, other): """Compare two objects for similarity. @param self: first object to compare @param other: second object to compare @return: L{Similarity} result of comparison """ sim = self.Similarity() total = 0.0 # Calculate similarity ratio for each attribute cname = self.__class__.__name__ for aname, weight in self.attributes.items(): attr1 = getattr(self, aname, None) attr2 = getattr(other, aname, None) self.log(attr1, attr2, '%', cname=cname, aname=aname) # Similarity is ignored if None on both objects if attr1 is None and attr2 is None: self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes are both None") continue # Similarity is 0 if either attribute is non-Comparable if not all((isinstance(attr1, Comparable), isinstance(attr2, Comparable))): self.log(attr1, attr2, '%', cname=cname, aname=aname, result="attributes not Comparable") total += weight continue # Calculate similarity between the attributes attr_sim = (attr1 % attr2) self.log(attr1, attr2, '%', cname=cname, aname=aname, result=attr_sim) # Add the similarity to the total sim += attr_sim * weight total += weight # Scale the similarity so the total is 1.0 if total: sim *= (1.0 / total) return sim
[ "def", "similarity", "(", "self", ",", "other", ")", ":", "sim", "=", "self", ".", "Similarity", "(", ")", "total", "=", "0.0", "# Calculate similarity ratio for each attribute", "cname", "=", "self", ".", "__class__", ".", "__name__", "for", "aname", ",", "...
Compare two objects for similarity. @param self: first object to compare @param other: second object to compare @return: L{Similarity} result of comparison
[ "Compare", "two", "objects", "for", "similarity", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L206-L253
jacebrowning/comparable
comparable/base.py
Comparable.Similarity
def Similarity(self, value=None): # pylint: disable=C0103 """Constructor for new default Similarities.""" if value is None: value = 0.0 return Similarity(value, threshold=self.threshold)
python
def Similarity(self, value=None): # pylint: disable=C0103 """Constructor for new default Similarities.""" if value is None: value = 0.0 return Similarity(value, threshold=self.threshold)
[ "def", "Similarity", "(", "self", ",", "value", "=", "None", ")", ":", "# pylint: disable=C0103", "if", "value", "is", "None", ":", "value", "=", "0.0", "return", "Similarity", "(", "value", ",", "threshold", "=", "self", ".", "threshold", ")" ]
Constructor for new default Similarities.
[ "Constructor", "for", "new", "default", "Similarities", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L255-L259
jacebrowning/comparable
comparable/base.py
Comparable.log
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913 """Log the objects being compared and the result. When no result object is specified, subsequence calls will have an increased indentation level. The indentation level is decreased once a result object is provided. @param obj1: first object @param obj2: second object @param sym: operation being performed ('==' or '%') @param cname: name of class (when attributes are being compared) @param aname: name of attribute (when attributes are being compared) @param result: outcome of comparison """ fmt = "{o1} {sym} {o2} : {r}" if cname or aname: assert cname and aname # both must be specified fmt = "{c}.{a}: " + fmt if result is None: result = '...' fmt = _Indent.indent(fmt) _Indent.more() else: _Indent.less() fmt = _Indent.indent(fmt) msg = fmt.format(o1=repr(obj1), o2=repr(obj2), c=cname, a=aname, sym=sym, r=result) logging.info(msg)
python
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913 """Log the objects being compared and the result. When no result object is specified, subsequence calls will have an increased indentation level. The indentation level is decreased once a result object is provided. @param obj1: first object @param obj2: second object @param sym: operation being performed ('==' or '%') @param cname: name of class (when attributes are being compared) @param aname: name of attribute (when attributes are being compared) @param result: outcome of comparison """ fmt = "{o1} {sym} {o2} : {r}" if cname or aname: assert cname and aname # both must be specified fmt = "{c}.{a}: " + fmt if result is None: result = '...' fmt = _Indent.indent(fmt) _Indent.more() else: _Indent.less() fmt = _Indent.indent(fmt) msg = fmt.format(o1=repr(obj1), o2=repr(obj2), c=cname, a=aname, sym=sym, r=result) logging.info(msg)
[ "def", "log", "(", "obj1", ",", "obj2", ",", "sym", ",", "cname", "=", "None", ",", "aname", "=", "None", ",", "result", "=", "None", ")", ":", "# pylint: disable=R0913", "fmt", "=", "\"{o1} {sym} {o2} : {r}\"", "if", "cname", "or", "aname", ":", "assert...
Log the objects being compared and the result. When no result object is specified, subsequence calls will have an increased indentation level. The indentation level is decreased once a result object is provided. @param obj1: first object @param obj2: second object @param sym: operation being performed ('==' or '%') @param cname: name of class (when attributes are being compared) @param aname: name of attribute (when attributes are being compared) @param result: outcome of comparison
[ "Log", "the", "objects", "being", "compared", "and", "the", "result", "." ]
train
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L262-L292
benley/butcher
butcher/targets/pkgfilegroup.py
PkgFileGroup.translate_path
def translate_path(self, dep_file, dep_rule): """Translate dep_file from dep_rule into this rule's output path.""" dst_base = dep_file.split(os.path.join(dep_rule.address.repo, dep_rule.address.path), 1)[-1] if self.params['strip_prefix']: dst_base = dep_file.split(self.params['strip_prefix'], 1)[-1] return os.path.join(self.address.repo, self.address.path, self.params['prefix'].lstrip('/'), dst_base.lstrip('/'))
python
def translate_path(self, dep_file, dep_rule): """Translate dep_file from dep_rule into this rule's output path.""" dst_base = dep_file.split(os.path.join(dep_rule.address.repo, dep_rule.address.path), 1)[-1] if self.params['strip_prefix']: dst_base = dep_file.split(self.params['strip_prefix'], 1)[-1] return os.path.join(self.address.repo, self.address.path, self.params['prefix'].lstrip('/'), dst_base.lstrip('/'))
[ "def", "translate_path", "(", "self", ",", "dep_file", ",", "dep_rule", ")", ":", "dst_base", "=", "dep_file", ".", "split", "(", "os", ".", "path", ".", "join", "(", "dep_rule", ".", "address", ".", "repo", ",", "dep_rule", ".", "address", ".", "path"...
Translate dep_file from dep_rule into this rule's output path.
[ "Translate", "dep_file", "from", "dep_rule", "into", "this", "rule", "s", "output", "path", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/pkgfilegroup.py#L51-L59
benley/butcher
butcher/targets/pkgfilegroup.py
PkgFileGroup.output_files
def output_files(self): """Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot. """ for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for dep_file in dep_rule.output_files: yield self.translate_path(dep_file, dep_rule).lstrip('/')
python
def output_files(self): """Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot. """ for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for dep_file in dep_rule.output_files: yield self.translate_path(dep_file, dep_rule).lstrip('/')
[ "def", "output_files", "(", "self", ")", ":", "for", "dep", "in", "self", ".", "subgraph", ".", "successors", "(", "self", ".", "address", ")", ":", "dep_rule", "=", "self", ".", "subgraph", ".", "node", "[", "dep", "]", "[", "'target_obj'", "]", "fo...
Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot.
[ "Returns", "the", "list", "of", "output", "files", "from", "this", "rule", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/pkgfilegroup.py#L62-L73
benley/butcher
butcher/targets/__init__.py
new
def new(ruletype, **kwargs): """Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...] """ try: ruleclass = TYPE_MAP[ruletype] except KeyError: raise error.InvalidRule('Unrecognized rule type: %s' % ruletype) try: return ruleclass(**kwargs) except TypeError: log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs) raise
python
def new(ruletype, **kwargs): """Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...] """ try: ruleclass = TYPE_MAP[ruletype] except KeyError: raise error.InvalidRule('Unrecognized rule type: %s' % ruletype) try: return ruleclass(**kwargs) except TypeError: log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs) raise
[ "def", "new", "(", "ruletype", ",", "*", "*", "kwargs", ")", ":", "try", ":", "ruleclass", "=", "TYPE_MAP", "[", "ruletype", "]", "except", "KeyError", ":", "raise", "error", ".", "InvalidRule", "(", "'Unrecognized rule type: %s'", "%", "ruletype", ")", "t...
Instantiate a new build rule based on kwargs. Appropriate args list varies with rule type. Minimum args required: [... fill this in ...]
[ "Instantiate", "a", "new", "build", "rule", "based", "on", "kwargs", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/__init__.py#L28-L44
thejunglejane/datums
datums/pipeline/codec.py
human_to_boolean
def human_to_boolean(human): '''Convert a boolean string ('Yes' or 'No') to True or False. PARAMETERS ---------- human : list a list containing the "human" boolean string to be converted to a Python boolean object. If a non-list is passed, or if the list is empty, None will be returned. Only the first element of the list will be used. Anything other than 'Yes' will be considered False. ''' if not isinstance(human, list) or len(human) == 0: return None if human[0].lower() == 'yes': return True return False
python
def human_to_boolean(human): '''Convert a boolean string ('Yes' or 'No') to True or False. PARAMETERS ---------- human : list a list containing the "human" boolean string to be converted to a Python boolean object. If a non-list is passed, or if the list is empty, None will be returned. Only the first element of the list will be used. Anything other than 'Yes' will be considered False. ''' if not isinstance(human, list) or len(human) == 0: return None if human[0].lower() == 'yes': return True return False
[ "def", "human_to_boolean", "(", "human", ")", ":", "if", "not", "isinstance", "(", "human", ",", "list", ")", "or", "len", "(", "human", ")", "==", "0", ":", "return", "None", "if", "human", "[", "0", "]", ".", "lower", "(", ")", "==", "'yes'", "...
Convert a boolean string ('Yes' or 'No') to True or False. PARAMETERS ---------- human : list a list containing the "human" boolean string to be converted to a Python boolean object. If a non-list is passed, or if the list is empty, None will be returned. Only the first element of the list will be used. Anything other than 'Yes' will be considered False.
[ "Convert", "a", "boolean", "string", "(", "Yes", "or", "No", ")", "to", "True", "or", "False", "." ]
train
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/pipeline/codec.py#L6-L22
littlemo/moear-package-mobi
moear_package_mobi/entry.py
Mobi.generate
def generate(self, data, *args, **kwargs): """ 根据传入的数据结构生成最终用于推送的文件字节字符串( :func:`bytes` ), MoEar会将其持久化并用于之后的推送任务 :param dict data: 待打包的数据结构 :return: 返回生成的书籍打包输出字节 :rtype: bytes """ with tempfile.TemporaryDirectory() as tmpdirname: self.options.setdefault('package_build_dir', tmpdirname) crawler = CrawlerScript(self.options) crawler.crawl(data, self.spider, *args, **kwargs) output_file = os.path.join( self.options['package_build_dir'], 'source', 'moear.mobi') with open(output_file, 'rb') as fh: content = fh.read() return content
python
def generate(self, data, *args, **kwargs): """ 根据传入的数据结构生成最终用于推送的文件字节字符串( :func:`bytes` ), MoEar会将其持久化并用于之后的推送任务 :param dict data: 待打包的数据结构 :return: 返回生成的书籍打包输出字节 :rtype: bytes """ with tempfile.TemporaryDirectory() as tmpdirname: self.options.setdefault('package_build_dir', tmpdirname) crawler = CrawlerScript(self.options) crawler.crawl(data, self.spider, *args, **kwargs) output_file = os.path.join( self.options['package_build_dir'], 'source', 'moear.mobi') with open(output_file, 'rb') as fh: content = fh.read() return content
[ "def", "generate", "(", "self", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "tmpdirname", ":", "self", ".", "options", ".", "setdefault", "(", "'package_build_dir'", ","...
根据传入的数据结构生成最终用于推送的文件字节字符串( :func:`bytes` ), MoEar会将其持久化并用于之后的推送任务 :param dict data: 待打包的数据结构 :return: 返回生成的书籍打包输出字节 :rtype: bytes
[ "根据传入的数据结构生成最终用于推送的文件字节字符串", "(", ":", "func", ":", "bytes", ")", ",", "MoEar会将其持久化并用于之后的推送任务" ]
train
https://github.com/littlemo/moear-package-mobi/blob/189a077bd0ad5309607957b3f1c0b65eae40ec90/moear_package_mobi/entry.py#L45-L64
mgaitan/mts
mts/orm_magic/hosts.py
_countdown
def _countdown(seconds): """ Wait `seconds` counting down. """ for i in range(seconds, 0, -1): sys.stdout.write("%02d" % i) time.sleep(1) sys.stdout.write("\b\b") sys.stdout.flush() sys.stdout.flush()
python
def _countdown(seconds): """ Wait `seconds` counting down. """ for i in range(seconds, 0, -1): sys.stdout.write("%02d" % i) time.sleep(1) sys.stdout.write("\b\b") sys.stdout.flush() sys.stdout.flush()
[ "def", "_countdown", "(", "seconds", ")", ":", "for", "i", "in", "range", "(", "seconds", ",", "0", ",", "-", "1", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\"%02d\"", "%", "i", ")", "time", ".", "sleep", "(", "1", ")", "sys", ".", ...
Wait `seconds` counting down.
[ "Wait", "seconds", "counting", "down", "." ]
train
https://github.com/mgaitan/mts/blob/bb018e987d4d6c10babb4627f117c894d0dd4c35/mts/orm_magic/hosts.py#L7-L16
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/bazaar.py
Bazaar.export
def export(self, location): """Export the Bazaar repository at the url to the destination location""" temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) if os.path.exists(location): # Remove the location to make sure Bazaar can export it correctly rmtree(location) try: call_subprocess([self.cmd, 'export', location], cwd=temp_dir, filter_stdout=self._filter, show_stdout=False) finally: rmtree(temp_dir)
python
def export(self, location): """Export the Bazaar repository at the url to the destination location""" temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) if os.path.exists(location): # Remove the location to make sure Bazaar can export it correctly rmtree(location) try: call_subprocess([self.cmd, 'export', location], cwd=temp_dir, filter_stdout=self._filter, show_stdout=False) finally: rmtree(temp_dir)
[ "def", "export", "(", "self", ",", "location", ")", ":", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", "'-export'", ",", "'pip-'", ")", "self", ".", "unpack", "(", "temp_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "location", ")", ":"...
Export the Bazaar repository at the url to the destination location
[ "Export", "the", "Bazaar", "repository", "at", "the", "url", "to", "the", "destination", "location" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/bazaar.py#L33-L44
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.verify
def verify(self, email): """ Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object. """ resp = self._call(endpoint='single', data={'email': email}) return VerifiedEmail(email, resp['result'])
python
def verify(self, email): """ Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object. """ resp = self._call(endpoint='single', data={'email': email}) return VerifiedEmail(email, resp['result'])
[ "def", "verify", "(", "self", ",", "email", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'single'", ",", "data", "=", "{", "'email'", ":", "email", "}", ")", "return", "VerifiedEmail", "(", "email", ",", "resp", "[", "'result'"...
Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object.
[ "Verify", "a", "single", "email", "address", ".", ":", "param", "str", "email", ":", "Email", "address", "to", "verify", ".", ":", "return", ":", "A", "VerifiedEmail", "object", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L18-L25
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.create_job
def create_job(self, emails): """ Create a new bulk verification job for the list of emails. :param list emails: Email addresses to verify. :return: A Job object. """ resp = self._call(endpoint='bulk', data={'input_location': '1', 'input': '\n'.join(emails)}) return Job(resp['job_id'])
python
def create_job(self, emails): """ Create a new bulk verification job for the list of emails. :param list emails: Email addresses to verify. :return: A Job object. """ resp = self._call(endpoint='bulk', data={'input_location': '1', 'input': '\n'.join(emails)}) return Job(resp['job_id'])
[ "def", "create_job", "(", "self", ",", "emails", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'bulk'", ",", "data", "=", "{", "'input_location'", ":", "'1'", ",", "'input'", ":", "'\\n'", ".", "join", "(", "emails", ")", "}", ...
Create a new bulk verification job for the list of emails. :param list emails: Email addresses to verify. :return: A Job object.
[ "Create", "a", "new", "bulk", "verification", "job", "for", "the", "list", "of", "emails", ".", ":", "param", "list", "emails", ":", "Email", "addresses", "to", "verify", ".", ":", "return", ":", "A", "Job", "object", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L27-L34
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.check_job
def check_job(self, job_id): """ Check the status of a bulk verification job. :param int job_id: ID of a job to check the status of. :return: A JobStatus object. """ resp = self._call(endpoint='status', data={'job_id': job_id}) map = {'id': 'job_id', 'status': 'status_code', 'type': 'type_code'} job_status_args = {map.get(k, k): v for k, v in resp.items()} return JobStatus(**job_status_args)
python
def check_job(self, job_id): """ Check the status of a bulk verification job. :param int job_id: ID of a job to check the status of. :return: A JobStatus object. """ resp = self._call(endpoint='status', data={'job_id': job_id}) map = {'id': 'job_id', 'status': 'status_code', 'type': 'type_code'} job_status_args = {map.get(k, k): v for k, v in resp.items()} return JobStatus(**job_status_args)
[ "def", "check_job", "(", "self", ",", "job_id", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'status'", ",", "data", "=", "{", "'job_id'", ":", "job_id", "}", ")", "map", "=", "{", "'id'", ":", "'job_id'", ",", "'status'", ":...
Check the status of a bulk verification job. :param int job_id: ID of a job to check the status of. :return: A JobStatus object.
[ "Check", "the", "status", "of", "a", "bulk", "verification", "job", ".", ":", "param", "int", "job_id", ":", "ID", "of", "a", "job", "to", "check", "the", "status", "of", ".", ":", "return", ":", "A", "JobStatus", "object", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L36-L45
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.results
def results(self, job_id): """ Yield the result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :yields: The next VerifiedEmail objects. """ resp = self._call(endpoint='download', data={'job_id': job_id}) Row = namedtuple('Row', ['email', 'result_text_code']) for line in resp: row = Row(*line.decode('utf-8').split(',')) yield VerifiedEmail.from_text_code(row.email, row.result_text_code)
python
def results(self, job_id): """ Yield the result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :yields: The next VerifiedEmail objects. """ resp = self._call(endpoint='download', data={'job_id': job_id}) Row = namedtuple('Row', ['email', 'result_text_code']) for line in resp: row = Row(*line.decode('utf-8').split(',')) yield VerifiedEmail.from_text_code(row.email, row.result_text_code)
[ "def", "results", "(", "self", ",", "job_id", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'download'", ",", "data", "=", "{", "'job_id'", ":", "job_id", "}", ")", "Row", "=", "namedtuple", "(", "'Row'", ",", "[", "'email'", ...
Yield the result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :yields: The next VerifiedEmail objects.
[ "Yield", "the", "result", "of", "a", "completed", "bulk", "verification", "job", ".", ":", "param", "int", "job_id", ":", "ID", "of", "a", "job", "to", "retrieve", "the", "results", "for", ".", ":", "yields", ":", "The", "next", "VerifiedEmail", "objects...
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L47-L57
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.retrieve_job
def retrieve_job(self, job_id): """ Result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :return: A list of VerifiedEmail objects. """ warnings.warn('Use results generator method instead of retrieve_job which returns a list', UserWarning) return list(self.results(job_id))
python
def retrieve_job(self, job_id): """ Result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :return: A list of VerifiedEmail objects. """ warnings.warn('Use results generator method instead of retrieve_job which returns a list', UserWarning) return list(self.results(job_id))
[ "def", "retrieve_job", "(", "self", ",", "job_id", ")", ":", "warnings", ".", "warn", "(", "'Use results generator method instead of retrieve_job which returns a list'", ",", "UserWarning", ")", "return", "list", "(", "self", ".", "results", "(", "job_id", ")", ")" ...
Result of a completed bulk verification job. :param int job_id: ID of a job to retrieve the results for. :return: A list of VerifiedEmail objects.
[ "Result", "of", "a", "completed", "bulk", "verification", "job", ".", ":", "param", "int", "job_id", ":", "ID", "of", "a", "job", "to", "retrieve", "the", "results", "for", ".", ":", "return", ":", "A", "list", "of", "VerifiedEmail", "objects", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L59-L66
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.account
def account(self): """ Get the API account details like balance of credits. :return: An Account object. """ resp = self._call(endpoint='account') return Account(resp['credits'], resp['jobs_completed'], resp['jobs_processing'])
python
def account(self): """ Get the API account details like balance of credits. :return: An Account object. """ resp = self._call(endpoint='account') return Account(resp['credits'], resp['jobs_completed'], resp['jobs_processing'])
[ "def", "account", "(", "self", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'account'", ")", "return", "Account", "(", "resp", "[", "'credits'", "]", ",", "resp", "[", "'jobs_completed'", "]", ",", "resp", "[", "'jobs_processing'",...
Get the API account details like balance of credits. :return: An Account object.
[ "Get", "the", "API", "account", "details", "like", "balance", "of", "credits", ".", ":", "return", ":", "An", "Account", "object", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L68-L74
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce.access_token
def access_token(self): """ Retrieve and cache an access token to authenticate API calls. :return: An access token string. """ if self._cached_access_token is not None: return self._cached_access_token resp = self._request(endpoint='access_token', data={'grant_type': 'client_credentials', 'scope': 'basic user'}, auth=(self.api_username, self.api_key)) self._cached_access_token = resp['access_token'] return self._cached_access_token
python
def access_token(self): """ Retrieve and cache an access token to authenticate API calls. :return: An access token string. """ if self._cached_access_token is not None: return self._cached_access_token resp = self._request(endpoint='access_token', data={'grant_type': 'client_credentials', 'scope': 'basic user'}, auth=(self.api_username, self.api_key)) self._cached_access_token = resp['access_token'] return self._cached_access_token
[ "def", "access_token", "(", "self", ")", ":", "if", "self", ".", "_cached_access_token", "is", "not", "None", ":", "return", "self", ".", "_cached_access_token", "resp", "=", "self", ".", "_request", "(", "endpoint", "=", "'access_token'", ",", "data", "=", ...
Retrieve and cache an access token to authenticate API calls. :return: An access token string.
[ "Retrieve", "and", "cache", "an", "access", "token", "to", "authenticate", "API", "calls", ".", ":", "return", ":", "An", "access", "token", "string", "." ]
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L80-L90
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce._call
def _call(self, endpoint, data=None): """ Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data. """ data = {} if data is None else data try: data['access_token'] = self.access_token() return self._request(endpoint, data) except AccessTokenExpired: self._cached_access_token = None data['access_token'] = self.access_token() return self._request(endpoint, data)
python
def _call(self, endpoint, data=None): """ Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data. """ data = {} if data is None else data try: data['access_token'] = self.access_token() return self._request(endpoint, data) except AccessTokenExpired: self._cached_access_token = None data['access_token'] = self.access_token() return self._request(endpoint, data)
[ "def", "_call", "(", "self", ",", "endpoint", ",", "data", "=", "None", ")", ":", "data", "=", "{", "}", "if", "data", "is", "None", "else", "data", "try", ":", "data", "[", "'access_token'", "]", "=", "self", ".", "access_token", "(", ")", "return...
Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data.
[ "Make", "an", "authorized", "API", "call", "to", "specified", "endpoint", ".", ":", "param", "str", "endpoint", ":", "API", "endpoint", "s", "relative", "URL", "eg", ".", "/", "account", ".", ":", "param", "dict", "data", ":", "POST", "request", "data", ...
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L96-L110
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce._request
def _request(self, endpoint, data, auth=None): """ Make HTTP POST request to an API endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :param tuple auth: HTTP basic auth credentials. :return: A dictionary or a string with response data. """ url = '{}/{}'.format(self.base_url, endpoint) response = requests.post(url, data, auth=auth) return self._handle_response(response)
python
def _request(self, endpoint, data, auth=None): """ Make HTTP POST request to an API endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :param tuple auth: HTTP basic auth credentials. :return: A dictionary or a string with response data. """ url = '{}/{}'.format(self.base_url, endpoint) response = requests.post(url, data, auth=auth) return self._handle_response(response)
[ "def", "_request", "(", "self", ",", "endpoint", ",", "data", ",", "auth", "=", "None", ")", ":", "url", "=", "'{}/{}'", ".", "format", "(", "self", ".", "base_url", ",", "endpoint", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "...
Make HTTP POST request to an API endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :param tuple auth: HTTP basic auth credentials. :return: A dictionary or a string with response data.
[ "Make", "HTTP", "POST", "request", "to", "an", "API", "endpoint", ".", ":", "param", "str", "endpoint", ":", "API", "endpoint", "s", "relative", "URL", "eg", ".", "/", "account", ".", ":", "param", "dict", "data", ":", "POST", "request", "data", ".", ...
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L112-L122
martinkosir/neverbounce-python
neverbounce/client.py
NeverBounce._handle_response
def _handle_response(response): """ Handle the response and possible failures. :param Response response: Response data. :return: A dictionary or a string with response data. :raises: NeverBounceAPIError if the API call fails. """ if not response.ok: raise NeverBounceAPIError(response) if response.headers.get('Content-Type') == 'application/octet-stream': return response.iter_lines() try: resp = response.json() except ValueError: raise InvalidResponseError('Failed to handle the response content-type {}.'.format( response.headers.get('Content-Type')) ) if 'success' in resp and not resp['success']: if 'msg' in resp and resp['msg'] == 'Authentication failed': raise AccessTokenExpired else: raise NeverBounceAPIError(response) return resp
python
def _handle_response(response): """ Handle the response and possible failures. :param Response response: Response data. :return: A dictionary or a string with response data. :raises: NeverBounceAPIError if the API call fails. """ if not response.ok: raise NeverBounceAPIError(response) if response.headers.get('Content-Type') == 'application/octet-stream': return response.iter_lines() try: resp = response.json() except ValueError: raise InvalidResponseError('Failed to handle the response content-type {}.'.format( response.headers.get('Content-Type')) ) if 'success' in resp and not resp['success']: if 'msg' in resp and resp['msg'] == 'Authentication failed': raise AccessTokenExpired else: raise NeverBounceAPIError(response) return resp
[ "def", "_handle_response", "(", "response", ")", ":", "if", "not", "response", ".", "ok", ":", "raise", "NeverBounceAPIError", "(", "response", ")", "if", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ")", "==", "'application/octet-stream'", "...
Handle the response and possible failures. :param Response response: Response data. :return: A dictionary or a string with response data. :raises: NeverBounceAPIError if the API call fails.
[ "Handle", "the", "response", "and", "possible", "failures", ".", ":", "param", "Response", "response", ":", "Response", "data", ".", ":", "return", ":", "A", "dictionary", "or", "a", "string", "with", "response", "data", ".", ":", "raises", ":", "NeverBoun...
train
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L125-L148
ohenrik/tabs
tabs/tables.py
post_process
def post_process(table, post_processors): """Applies the list of post processing methods if any""" table_result = table for processor in post_processors: table_result = processor(table_result) return table_result
python
def post_process(table, post_processors): """Applies the list of post processing methods if any""" table_result = table for processor in post_processors: table_result = processor(table_result) return table_result
[ "def", "post_process", "(", "table", ",", "post_processors", ")", ":", "table_result", "=", "table", "for", "processor", "in", "post_processors", ":", "table_result", "=", "processor", "(", "table_result", ")", "return", "table_result" ]
Applies the list of post processing methods if any
[ "Applies", "the", "list", "of", "post", "processing", "methods", "if", "any" ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L8-L13
ohenrik/tabs
tabs/tables.py
describe
def describe(cls, full=False): """Prints a description of the table based on the provided documentation and post processors""" divider_double = "=" * 80 divider_single = "-" * 80 description = cls.__doc__ message = [] message.append(divider_double) message.append(cls.__name__ + ':') message.append(description) if full and cls.post_processors(cls): message.append(divider_single) message.append("Post processors:") message.append(divider_single) for processor in cls.post_processors(cls): message.append(">" + " " * 3 + processor.__name__ + ':') message.append(" " * 4 + processor.__doc__) message.append('') message.append(divider_double) message.append('') for line in message: print(line)
python
def describe(cls, full=False): """Prints a description of the table based on the provided documentation and post processors""" divider_double = "=" * 80 divider_single = "-" * 80 description = cls.__doc__ message = [] message.append(divider_double) message.append(cls.__name__ + ':') message.append(description) if full and cls.post_processors(cls): message.append(divider_single) message.append("Post processors:") message.append(divider_single) for processor in cls.post_processors(cls): message.append(">" + " " * 3 + processor.__name__ + ':') message.append(" " * 4 + processor.__doc__) message.append('') message.append(divider_double) message.append('') for line in message: print(line)
[ "def", "describe", "(", "cls", ",", "full", "=", "False", ")", ":", "divider_double", "=", "\"=\"", "*", "80", "divider_single", "=", "\"-\"", "*", "80", "description", "=", "cls", ".", "__doc__", "message", "=", "[", "]", "message", ".", "append", "("...
Prints a description of the table based on the provided documentation and post processors
[ "Prints", "a", "description", "of", "the", "table", "based", "on", "the", "provided", "documentation", "and", "post", "processors" ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L15-L36
ohenrik/tabs
tabs/tables.py
BaseTableABC.describe_processors
def describe_processors(cls): """List all postprocessors and their description""" # TODO: Add dependencies to this dictionary for processor in cls.post_processors(cls): yield {'name': processor.__name__, 'description': processor.__doc__, 'processor': processor}
python
def describe_processors(cls): """List all postprocessors and their description""" # TODO: Add dependencies to this dictionary for processor in cls.post_processors(cls): yield {'name': processor.__name__, 'description': processor.__doc__, 'processor': processor}
[ "def", "describe_processors", "(", "cls", ")", ":", "# TODO: Add dependencies to this dictionary", "for", "processor", "in", "cls", ".", "post_processors", "(", "cls", ")", ":", "yield", "{", "'name'", ":", "processor", ".", "__name__", ",", "'description'", ":", ...
List all postprocessors and their description
[ "List", "all", "postprocessors", "and", "their", "description" ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L68-L74
ohenrik/tabs
tabs/tables.py
BaseTableABC.dependencies
def dependencies(cls): """Returns a list of all dependent tables, in the order they are defined. Add new dependencies for source and every post proecssor like this:: source.dependencies = [PersonalData] some_post_processor.dependencies = [SomeOtherTable, AnotherTable] `some_post_processor.dependencies` needs to be placed after `some_post_processor` is defined. """ dependencies = [] try: dependencies += cls.source.dependencies except AttributeError: pass for processor in cls.post_processors(cls): try: assert isinstance(processor.dependencies, list), \ "{}.dependencies must be a list".format(processor.__name__) dependencies += processor.dependencies except AttributeError: pass return dependencies
python
def dependencies(cls): """Returns a list of all dependent tables, in the order they are defined. Add new dependencies for source and every post proecssor like this:: source.dependencies = [PersonalData] some_post_processor.dependencies = [SomeOtherTable, AnotherTable] `some_post_processor.dependencies` needs to be placed after `some_post_processor` is defined. """ dependencies = [] try: dependencies += cls.source.dependencies except AttributeError: pass for processor in cls.post_processors(cls): try: assert isinstance(processor.dependencies, list), \ "{}.dependencies must be a list".format(processor.__name__) dependencies += processor.dependencies except AttributeError: pass return dependencies
[ "def", "dependencies", "(", "cls", ")", ":", "dependencies", "=", "[", "]", "try", ":", "dependencies", "+=", "cls", ".", "source", ".", "dependencies", "except", "AttributeError", ":", "pass", "for", "processor", "in", "cls", ".", "post_processors", "(", ...
Returns a list of all dependent tables, in the order they are defined. Add new dependencies for source and every post proecssor like this:: source.dependencies = [PersonalData] some_post_processor.dependencies = [SomeOtherTable, AnotherTable] `some_post_processor.dependencies` needs to be placed after `some_post_processor` is defined.
[ "Returns", "a", "list", "of", "all", "dependent", "tables", "in", "the", "order", "they", "are", "defined", "." ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L87-L111
ohenrik/tabs
tabs/tables.py
BaseTableABC.get_settings_list
def get_settings_list(self): """The settings list used for building the cache id.""" return [ self.source, self.output, self.kwargs, self.post_processors, ]
python
def get_settings_list(self): """The settings list used for building the cache id.""" return [ self.source, self.output, self.kwargs, self.post_processors, ]
[ "def", "get_settings_list", "(", "self", ")", ":", "return", "[", "self", ".", "source", ",", "self", ".", "output", ",", "self", ".", "kwargs", ",", "self", ".", "post_processors", ",", "]" ]
The settings list used for building the cache id.
[ "The", "settings", "list", "used", "for", "building", "the", "cache", "id", "." ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L117-L124
ohenrik/tabs
tabs/tables.py
BaseTableABC.get_hash
def get_hash(self): """Retruns a hash based on the the current table code and kwargs. Also changes based on dependent tables.""" depencency_hashes = [dep.get_hash() for dep in self.dep()] sl = inspect.getsourcelines hash_sources = [sl(self.__class__), self.args, self.kwargs, *depencency_hashes] hash_input = pickle.dumps(hash_sources) return hashlib.md5(hash_input).hexdigest()
python
def get_hash(self): """Retruns a hash based on the the current table code and kwargs. Also changes based on dependent tables.""" depencency_hashes = [dep.get_hash() for dep in self.dep()] sl = inspect.getsourcelines hash_sources = [sl(self.__class__), self.args, self.kwargs, *depencency_hashes] hash_input = pickle.dumps(hash_sources) return hashlib.md5(hash_input).hexdigest()
[ "def", "get_hash", "(", "self", ")", ":", "depencency_hashes", "=", "[", "dep", ".", "get_hash", "(", ")", "for", "dep", "in", "self", ".", "dep", "(", ")", "]", "sl", "=", "inspect", ".", "getsourcelines", "hash_sources", "=", "[", "sl", "(", "self"...
Retruns a hash based on the the current table code and kwargs. Also changes based on dependent tables.
[ "Retruns", "a", "hash", "based", "on", "the", "the", "current", "table", "code", "and", "kwargs", ".", "Also", "changes", "based", "on", "dependent", "tables", "." ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L126-L134
ohenrik/tabs
tabs/tables.py
BaseTableABC.get_cached_filename
def get_cached_filename(self, filename, extention, settings_list=None): """Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely. """ cached_name = "_".join([filename, self.get_hash()]) return ".".join([cached_name, extention])
python
def get_cached_filename(self, filename, extention, settings_list=None): """Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely. """ cached_name = "_".join([filename, self.get_hash()]) return ".".join([cached_name, extention])
[ "def", "get_cached_filename", "(", "self", ",", "filename", ",", "extention", ",", "settings_list", "=", "None", ")", ":", "cached_name", "=", "\"_\"", ".", "join", "(", "[", "filename", ",", "self", ".", "get_hash", "(", ")", "]", ")", "return", "\".\""...
Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely.
[ "Creates", "a", "filename", "with", "md5", "cache", "string", "based", "on", "settings", "list" ]
train
https://github.com/ohenrik/tabs/blob/039ced6c5612ecdd551aeaac63789862aba05711/tabs/tables.py#L136-L147