repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
appstore-zencore/daemon-application
src/daemon_application/base.py
daemon_stop
def daemon_stop(pidfile, sig=None): """Stop application. """ logger.debug("stop daemon application pidfile={pidfile}.".format(pidfile=pidfile)) pid = load_pid(pidfile) logger.debug("load pid={pid}".format(pid=pid)) if not pid: six.print_("Application is not running or crashed...", file=os.sys.stderr) os.sys.exit(195) process_kill(pid, sig) return pid
python
def daemon_stop(pidfile, sig=None): """Stop application. """ logger.debug("stop daemon application pidfile={pidfile}.".format(pidfile=pidfile)) pid = load_pid(pidfile) logger.debug("load pid={pid}".format(pid=pid)) if not pid: six.print_("Application is not running or crashed...", file=os.sys.stderr) os.sys.exit(195) process_kill(pid, sig) return pid
[ "def", "daemon_stop", "(", "pidfile", ",", "sig", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"stop daemon application pidfile={pidfile}.\"", ".", "format", "(", "pidfile", "=", "pidfile", ")", ")", "pid", "=", "load_pid", "(", "pidfile", ")", "log...
Stop application.
[ "Stop", "application", "." ]
train
https://github.com/appstore-zencore/daemon-application/blob/e8d716dbaa7becfda95e144cce51558b0c9615e5/src/daemon_application/base.py#L141-L151
azraq27/neural
neural/dicom.py
is_dicom
def is_dicom(filename): '''returns Boolean of whether the given file has the DICOM magic number''' try: with open(filename) as f: d = f.read(132) return d[128:132]=="DICM" except: return False
python
def is_dicom(filename): '''returns Boolean of whether the given file has the DICOM magic number''' try: with open(filename) as f: d = f.read(132) return d[128:132]=="DICM" except: return False
[ "def", "is_dicom", "(", "filename", ")", ":", "try", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "d", "=", "f", ".", "read", "(", "132", ")", "return", "d", "[", "128", ":", "132", "]", "==", "\"DICM\"", "except", ":", "return", ...
returns Boolean of whether the given file has the DICOM magic number
[ "returns", "Boolean", "of", "whether", "the", "given", "file", "has", "the", "DICOM", "magic", "number" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L13-L20
azraq27/neural
neural/dicom.py
info
def info(filename): '''returns a DicomInfo object containing the header information in ``filename``''' try: out = subprocess.check_output([_dicom_hdr,'-sexinfo',filename]) except subprocess.CalledProcessError: return None slice_timing_out = subprocess.check_output([_dicom_hdr,'-slice_times',filename]) slice_timing = [float(x) for x in slice_timing_out.strip().split()[5:]] frames = [] for frame in re.findall(r'^(\w{4}) (\w{4})\s+(\d+) \[(\d+)\s+\] \/\/(.*?)\/\/(.*?)$',out,re.M): new_frame = {} new_frame['addr'] = (int(frame[0],16),int(frame[1],16)) new_frame['size'] = int(frame[2]) new_frame['offset'] = int(frame[3]) new_frame['label'] = frame[4].strip() new_frame['value'] = frame[5].strip() frames.append(new_frame) sex_info = {} for i in re.findall(r'^(.*?)\s+= (.*)$',out,re.M): sex_info[i[0]] = i[1] return DicomInfo(frames,sex_info,slice_timing)
python
def info(filename): '''returns a DicomInfo object containing the header information in ``filename``''' try: out = subprocess.check_output([_dicom_hdr,'-sexinfo',filename]) except subprocess.CalledProcessError: return None slice_timing_out = subprocess.check_output([_dicom_hdr,'-slice_times',filename]) slice_timing = [float(x) for x in slice_timing_out.strip().split()[5:]] frames = [] for frame in re.findall(r'^(\w{4}) (\w{4})\s+(\d+) \[(\d+)\s+\] \/\/(.*?)\/\/(.*?)$',out,re.M): new_frame = {} new_frame['addr'] = (int(frame[0],16),int(frame[1],16)) new_frame['size'] = int(frame[2]) new_frame['offset'] = int(frame[3]) new_frame['label'] = frame[4].strip() new_frame['value'] = frame[5].strip() frames.append(new_frame) sex_info = {} for i in re.findall(r'^(.*?)\s+= (.*)$',out,re.M): sex_info[i[0]] = i[1] return DicomInfo(frames,sex_info,slice_timing)
[ "def", "info", "(", "filename", ")", ":", "try", ":", "out", "=", "subprocess", ".", "check_output", "(", "[", "_dicom_hdr", ",", "'-sexinfo'", ",", "filename", "]", ")", "except", "subprocess", ".", "CalledProcessError", ":", "return", "None", "slice_timing...
returns a DicomInfo object containing the header information in ``filename``
[ "returns", "a", "DicomInfo", "object", "containing", "the", "header", "information", "in", "filename" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L62-L83
azraq27/neural
neural/dicom.py
info_for_tags
def info_for_tags(filename,tags): '''return a dictionary for the given ``tags`` in the header of the DICOM file ``filename`` ``tags`` is expected to be a list of tuples that contains the DICOM address in hex values. basically a rewrite of :meth:`info` because it's so slow. This is a lot faster and more reliable''' if isinstance(tags,tuple): tags = [tags] d = pydicom.read_file(filename) return_dict = {} dicom_info = None for k in tags: if k in d: return_dict[k] = d[k].value else: # Backup to the old method if dicom_info==None: dicom_info = info(filename) i = dicom_info.addr(k) if i: return_dict[k] = nl.numberize(i['value']) return return_dict
python
def info_for_tags(filename,tags): '''return a dictionary for the given ``tags`` in the header of the DICOM file ``filename`` ``tags`` is expected to be a list of tuples that contains the DICOM address in hex values. basically a rewrite of :meth:`info` because it's so slow. This is a lot faster and more reliable''' if isinstance(tags,tuple): tags = [tags] d = pydicom.read_file(filename) return_dict = {} dicom_info = None for k in tags: if k in d: return_dict[k] = d[k].value else: # Backup to the old method if dicom_info==None: dicom_info = info(filename) i = dicom_info.addr(k) if i: return_dict[k] = nl.numberize(i['value']) return return_dict
[ "def", "info_for_tags", "(", "filename", ",", "tags", ")", ":", "if", "isinstance", "(", "tags", ",", "tuple", ")", ":", "tags", "=", "[", "tags", "]", "d", "=", "pydicom", ".", "read_file", "(", "filename", ")", "return_dict", "=", "{", "}", "dicom_...
return a dictionary for the given ``tags`` in the header of the DICOM file ``filename`` ``tags`` is expected to be a list of tuples that contains the DICOM address in hex values. basically a rewrite of :meth:`info` because it's so slow. This is a lot faster and more reliable
[ "return", "a", "dictionary", "for", "the", "given", "tags", "in", "the", "header", "of", "the", "DICOM", "file", "filename" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L85-L106
azraq27/neural
neural/dicom.py
scan_dir
def scan_dir(dirname,tags=None,md5_hash=False): '''scans a directory tree and returns a dictionary with files and key DICOM tags return value is a dictionary absolute filenames as keys and with dictionaries of tags/values as values the param ``tags`` is the list of DICOM tags (given as tuples of hex numbers) that will be obtained for each file. If not given, the default list is: :0008 0021: Series date :0008 0031: Series time :0008 103E: Series description :0008 0080: Institution name :0010 0020: Patient ID :0028 0010: Image rows :0028 0011: Image columns If the param ``md5_hash`` is ``True``, this will also return the MD5 hash of the file. This is useful for detecting duplicate files ''' if tags==None: tags = [ (0x0008, 0x0021), (0x0008, 0x0031), (0x0008, 0x103E), (0x0008, 0x0080), (0x0010, 0x0020), (0x0028, 0x0010), (0x0028, 0x0011), ] return_dict = {} for root,dirs,files in os.walk(dirname): for filename in files: fullname = os.path.join(root,filename) if is_dicom(fullname): return_dict[fullname] = info_for_tags(fullname,tags) if md5_hash: return_dict[fullname]['md5'] = nl.hash(fullname) return return_dict
python
def scan_dir(dirname,tags=None,md5_hash=False): '''scans a directory tree and returns a dictionary with files and key DICOM tags return value is a dictionary absolute filenames as keys and with dictionaries of tags/values as values the param ``tags`` is the list of DICOM tags (given as tuples of hex numbers) that will be obtained for each file. If not given, the default list is: :0008 0021: Series date :0008 0031: Series time :0008 103E: Series description :0008 0080: Institution name :0010 0020: Patient ID :0028 0010: Image rows :0028 0011: Image columns If the param ``md5_hash`` is ``True``, this will also return the MD5 hash of the file. This is useful for detecting duplicate files ''' if tags==None: tags = [ (0x0008, 0x0021), (0x0008, 0x0031), (0x0008, 0x103E), (0x0008, 0x0080), (0x0010, 0x0020), (0x0028, 0x0010), (0x0028, 0x0011), ] return_dict = {} for root,dirs,files in os.walk(dirname): for filename in files: fullname = os.path.join(root,filename) if is_dicom(fullname): return_dict[fullname] = info_for_tags(fullname,tags) if md5_hash: return_dict[fullname]['md5'] = nl.hash(fullname) return return_dict
[ "def", "scan_dir", "(", "dirname", ",", "tags", "=", "None", ",", "md5_hash", "=", "False", ")", ":", "if", "tags", "==", "None", ":", "tags", "=", "[", "(", "0x0008", ",", "0x0021", ")", ",", "(", "0x0008", ",", "0x0031", ")", ",", "(", "0x0008"...
scans a directory tree and returns a dictionary with files and key DICOM tags return value is a dictionary absolute filenames as keys and with dictionaries of tags/values as values the param ``tags`` is the list of DICOM tags (given as tuples of hex numbers) that will be obtained for each file. If not given, the default list is: :0008 0021: Series date :0008 0031: Series time :0008 103E: Series description :0008 0080: Institution name :0010 0020: Patient ID :0028 0010: Image rows :0028 0011: Image columns If the param ``md5_hash`` is ``True``, this will also return the MD5 hash of the file. This is useful for detecting duplicate files
[ "scans", "a", "directory", "tree", "and", "returns", "a", "dictionary", "with", "files", "and", "key", "DICOM", "tags" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L108-L149
azraq27/neural
neural/dicom.py
find_dups
def find_dups(file_dict): '''takes output from :meth:`scan_dir` and returns list of duplicate files''' found_hashes = {} for f in file_dict: if file_dict[f]['md5'] not in found_hashes: found_hashes[file_dict[f]['md5']] = [] found_hashes[file_dict[f]['md5']].append(f) final_hashes = dict(found_hashes) for h in found_hashes: if len(found_hashes[h])<2: del(final_hashes[h]) return final_hashes.values()
python
def find_dups(file_dict): '''takes output from :meth:`scan_dir` and returns list of duplicate files''' found_hashes = {} for f in file_dict: if file_dict[f]['md5'] not in found_hashes: found_hashes[file_dict[f]['md5']] = [] found_hashes[file_dict[f]['md5']].append(f) final_hashes = dict(found_hashes) for h in found_hashes: if len(found_hashes[h])<2: del(final_hashes[h]) return final_hashes.values()
[ "def", "find_dups", "(", "file_dict", ")", ":", "found_hashes", "=", "{", "}", "for", "f", "in", "file_dict", ":", "if", "file_dict", "[", "f", "]", "[", "'md5'", "]", "not", "in", "found_hashes", ":", "found_hashes", "[", "file_dict", "[", "f", "]", ...
takes output from :meth:`scan_dir` and returns list of duplicate files
[ "takes", "output", "from", ":", "meth", ":", "scan_dir", "and", "returns", "list", "of", "duplicate", "files" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L155-L166
azraq27/neural
neural/dicom.py
cluster_files
def cluster_files(file_dict): '''takes output from :meth:`scan_dir` and organizes into lists of files with the same tags returns a dictionary where values are a tuple of the unique tag combination and values contain another dictionary with the keys ``info`` containing the original tag dict and ``files`` containing a list of files that match''' return_dict = {} for filename in file_dict: info_dict = dict(file_dict[filename]) if 'md5' in info_dict: del(info_dict['md5']) dict_key = tuple(sorted([file_dict[filename][x] for x in info_dict])) if dict_key not in return_dict: return_dict[dict_key] = {'info':info_dict,'files':[]} return_dict[dict_key]['files'].append(filename) return return_dict
python
def cluster_files(file_dict): '''takes output from :meth:`scan_dir` and organizes into lists of files with the same tags returns a dictionary where values are a tuple of the unique tag combination and values contain another dictionary with the keys ``info`` containing the original tag dict and ``files`` containing a list of files that match''' return_dict = {} for filename in file_dict: info_dict = dict(file_dict[filename]) if 'md5' in info_dict: del(info_dict['md5']) dict_key = tuple(sorted([file_dict[filename][x] for x in info_dict])) if dict_key not in return_dict: return_dict[dict_key] = {'info':info_dict,'files':[]} return_dict[dict_key]['files'].append(filename) return return_dict
[ "def", "cluster_files", "(", "file_dict", ")", ":", "return_dict", "=", "{", "}", "for", "filename", "in", "file_dict", ":", "info_dict", "=", "dict", "(", "file_dict", "[", "filename", "]", ")", "if", "'md5'", "in", "info_dict", ":", "del", "(", "info_d...
takes output from :meth:`scan_dir` and organizes into lists of files with the same tags returns a dictionary where values are a tuple of the unique tag combination and values contain another dictionary with the keys ``info`` containing the original tag dict and ``files`` containing a list of files that match
[ "takes", "output", "from", ":", "meth", ":", "scan_dir", "and", "organizes", "into", "lists", "of", "files", "with", "the", "same", "tags" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L168-L183
azraq27/neural
neural/dicom.py
max_diff
def max_diff(dset1,dset2): '''calculates maximal voxel-wise difference in datasets (in %) Useful for checking if datasets have the same data. For example, if the maximum difference is < 1.0%, they're probably the same dataset''' for dset in [dset1,dset2]: if not os.path.exists(dset): nl.notify('Error: Could not find file: %s' % dset,level=nl.level.error) return float('inf') try: dset1_d = nib.load(dset1) dset2_d = nib.load(dset2) dset1_data = dset1_d.get_data() dset2_data = dset2_d.get_data() except IOError: nl.notify('Error: Could not read files %s and %s' % (dset1,dset2),level=nl.level.error) return float('inf') try: old_err = np.seterr(divide='ignore',invalid='ignore') max_val = 100*np.max(np.ma.masked_invalid(np.double(dset1_data - dset2_data) / ((dset1_data+dset2_data)/2))) np.seterr(**old_err) return max_val except ValueError: return float('inf')
python
def max_diff(dset1,dset2): '''calculates maximal voxel-wise difference in datasets (in %) Useful for checking if datasets have the same data. For example, if the maximum difference is < 1.0%, they're probably the same dataset''' for dset in [dset1,dset2]: if not os.path.exists(dset): nl.notify('Error: Could not find file: %s' % dset,level=nl.level.error) return float('inf') try: dset1_d = nib.load(dset1) dset2_d = nib.load(dset2) dset1_data = dset1_d.get_data() dset2_data = dset2_d.get_data() except IOError: nl.notify('Error: Could not read files %s and %s' % (dset1,dset2),level=nl.level.error) return float('inf') try: old_err = np.seterr(divide='ignore',invalid='ignore') max_val = 100*np.max(np.ma.masked_invalid(np.double(dset1_data - dset2_data) / ((dset1_data+dset2_data)/2))) np.seterr(**old_err) return max_val except ValueError: return float('inf')
[ "def", "max_diff", "(", "dset1", ",", "dset2", ")", ":", "for", "dset", "in", "[", "dset1", ",", "dset2", "]", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dset", ")", ":", "nl", ".", "notify", "(", "'Error: Could not find file: %s'", "%...
calculates maximal voxel-wise difference in datasets (in %) Useful for checking if datasets have the same data. For example, if the maximum difference is < 1.0%, they're probably the same dataset
[ "calculates", "maximal", "voxel", "-", "wise", "difference", "in", "datasets", "(", "in", "%", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L185-L208
azraq27/neural
neural/dicom.py
create_dset_to3d
def create_dset_to3d(prefix,file_list,file_order='zt',num_slices=None,num_reps=None,TR=None,slice_order='alt+z',only_dicoms=True,sort_filenames=False): '''manually create dataset by specifying everything (not recommended, but necessary when autocreation fails) If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted, it assumes that this it not a time-dependent dataset :only_dicoms: filter the given list by readable DICOM images :sort_filenames: sort the given files by filename using the right-most number in the filename''' tags = { 'num_rows': (0x0028,0x0010), 'num_reps': (0x0020,0x0105), 'TR': (0x0018,0x0080) } with nl.notify('Trying to create dataset %s' % prefix): if os.path.exists(prefix): nl.notify('Error: file "%s" already exists!' % prefix,level=nl.level.error) return False tagvals = {} for f in file_list: try: tagvals[f] = info_for_tags(f,tags.values()) except: pass if only_dicoms: new_file_list = [] for f in file_list: if f in tagvals and len(tagvals[f][tags['num_rows']])>0: # Only include DICOMs that actually have image information new_file_list.append(f) file_list = new_file_list if sort_filenames: def file_num(fname): try: nums = [x.strip('.') for x in re.findall(r'[\d.]+',fname) if x.strip('.')!=''] return float(nums[-1]) except: return fname file_list = sorted(file_list,key=file_num) if len(file_list)==0: nl.notify('Error: Couldn\'t find any valid DICOM images',level=nl.level.error) return False cmd = ['to3d','-skip_outliers','-quit_on_err','-prefix',prefix] if num_slices!=None or num_reps!=None: # Time-based dataset if num_slices==None: if len(file_list)%num_reps!=0: nl.notify('Error: trying to guess # of slices, but %d (number for files) doesn\'t divide evenly into %d (number of reps)' % (len(file_list),num_reps),level=nl.level.error) return False num_slices = len(file_list)/num_reps if num_reps==None: if len(file_list)%num_slices==0: num_reps = len(file_list)/num_slices elif len(file_list)==1 and tags['num_reps'] in tagvals[file_list[0]]: num_reps = tagvals[file_list[0]][tags['num_reps']] else: nl.notify('Error: trying to guess # of reps, but %d (number for files) doesn\'t divide evenly into %d (number of slices)' % (len(file_list),num_slices),level=nl.level.error) return False if TR==None: TR = tagvals[file_list[0]][tags['TR']] cmd += ['-time:%s'%file_order] if file_order=='zt': cmd += [num_slices,num_reps] else: cmd += [num_reps,num_slices] cmd += [TR,slice_order] cmd += ['-@'] cmd = [str(x) for x in cmd] out = None try: p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate('\n'.join(file_list)) if p.returncode!=0: raise Exception except: with nl.notify('Error: to3d returned error',level=nl.level.error): if out: nl.notify('stdout:\n' + out[0] + '\nstderr:\n' + out[1],level=nl.level.error) return False
python
def create_dset_to3d(prefix,file_list,file_order='zt',num_slices=None,num_reps=None,TR=None,slice_order='alt+z',only_dicoms=True,sort_filenames=False): '''manually create dataset by specifying everything (not recommended, but necessary when autocreation fails) If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted, it assumes that this it not a time-dependent dataset :only_dicoms: filter the given list by readable DICOM images :sort_filenames: sort the given files by filename using the right-most number in the filename''' tags = { 'num_rows': (0x0028,0x0010), 'num_reps': (0x0020,0x0105), 'TR': (0x0018,0x0080) } with nl.notify('Trying to create dataset %s' % prefix): if os.path.exists(prefix): nl.notify('Error: file "%s" already exists!' % prefix,level=nl.level.error) return False tagvals = {} for f in file_list: try: tagvals[f] = info_for_tags(f,tags.values()) except: pass if only_dicoms: new_file_list = [] for f in file_list: if f in tagvals and len(tagvals[f][tags['num_rows']])>0: # Only include DICOMs that actually have image information new_file_list.append(f) file_list = new_file_list if sort_filenames: def file_num(fname): try: nums = [x.strip('.') for x in re.findall(r'[\d.]+',fname) if x.strip('.')!=''] return float(nums[-1]) except: return fname file_list = sorted(file_list,key=file_num) if len(file_list)==0: nl.notify('Error: Couldn\'t find any valid DICOM images',level=nl.level.error) return False cmd = ['to3d','-skip_outliers','-quit_on_err','-prefix',prefix] if num_slices!=None or num_reps!=None: # Time-based dataset if num_slices==None: if len(file_list)%num_reps!=0: nl.notify('Error: trying to guess # of slices, but %d (number for files) doesn\'t divide evenly into %d (number of reps)' % (len(file_list),num_reps),level=nl.level.error) return False num_slices = len(file_list)/num_reps if num_reps==None: if len(file_list)%num_slices==0: num_reps = len(file_list)/num_slices elif len(file_list)==1 and tags['num_reps'] in tagvals[file_list[0]]: num_reps = tagvals[file_list[0]][tags['num_reps']] else: nl.notify('Error: trying to guess # of reps, but %d (number for files) doesn\'t divide evenly into %d (number of slices)' % (len(file_list),num_slices),level=nl.level.error) return False if TR==None: TR = tagvals[file_list[0]][tags['TR']] cmd += ['-time:%s'%file_order] if file_order=='zt': cmd += [num_slices,num_reps] else: cmd += [num_reps,num_slices] cmd += [TR,slice_order] cmd += ['-@'] cmd = [str(x) for x in cmd] out = None try: p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate('\n'.join(file_list)) if p.returncode!=0: raise Exception except: with nl.notify('Error: to3d returned error',level=nl.level.error): if out: nl.notify('stdout:\n' + out[0] + '\nstderr:\n' + out[1],level=nl.level.error) return False
[ "def", "create_dset_to3d", "(", "prefix", ",", "file_list", ",", "file_order", "=", "'zt'", ",", "num_slices", "=", "None", ",", "num_reps", "=", "None", ",", "TR", "=", "None", ",", "slice_order", "=", "'alt+z'", ",", "only_dicoms", "=", "True", ",", "s...
manually create dataset by specifying everything (not recommended, but necessary when autocreation fails) If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted, it assumes that this it not a time-dependent dataset :only_dicoms: filter the given list by readable DICOM images :sort_filenames: sort the given files by filename using the right-most number in the filename
[ "manually", "create", "dataset", "by", "specifying", "everything", "(", "not", "recommended", "but", "necessary", "when", "autocreation", "fails", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L327-L412
azraq27/neural
neural/dicom.py
create_dset
def create_dset(directory,slice_order='alt+z',sort_order='zt',force_slices=None): '''tries to autocreate a dataset from images in the given directory''' return _create_dset_dicom(directory,slice_order,sort_order,force_slices=force_slices)
python
def create_dset(directory,slice_order='alt+z',sort_order='zt',force_slices=None): '''tries to autocreate a dataset from images in the given directory''' return _create_dset_dicom(directory,slice_order,sort_order,force_slices=force_slices)
[ "def", "create_dset", "(", "directory", ",", "slice_order", "=", "'alt+z'", ",", "sort_order", "=", "'zt'", ",", "force_slices", "=", "None", ")", ":", "return", "_create_dset_dicom", "(", "directory", ",", "slice_order", ",", "sort_order", ",", "force_slices", ...
tries to autocreate a dataset from images in the given directory
[ "tries", "to", "autocreate", "a", "dataset", "from", "images", "in", "the", "given", "directory" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L414-L416
azraq27/neural
neural/dicom.py
date_for_str
def date_for_str(date_str): '''tries to guess date from ambiguous date string''' try: for date_format in itertools.permutations(['%Y','%m','%d']): try: date = datetime.strptime(date_str,''.join(date_format)) raise StopIteration except ValueError: pass return None except StopIteration: return date
python
def date_for_str(date_str): '''tries to guess date from ambiguous date string''' try: for date_format in itertools.permutations(['%Y','%m','%d']): try: date = datetime.strptime(date_str,''.join(date_format)) raise StopIteration except ValueError: pass return None except StopIteration: return date
[ "def", "date_for_str", "(", "date_str", ")", ":", "try", ":", "for", "date_format", "in", "itertools", ".", "permutations", "(", "[", "'%Y'", ",", "'%m'", ",", "'%d'", "]", ")", ":", "try", ":", "date", "=", "datetime", ".", "strptime", "(", "date_str"...
tries to guess date from ambiguous date string
[ "tries", "to", "guess", "date", "from", "ambiguous", "date", "string" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L419-L430
azraq27/neural
neural/dicom.py
organize_dir
def organize_dir(orig_dir): '''scans through the given directory and organizes DICOMs that look similar into subdirectories output directory is the ``orig_dir`` with ``-sorted`` appended to the end''' tags = [ (0x10,0x20), # Subj ID (0x8,0x21), # Date (0x8,0x31), # Time (0x8,0x103e) # Descr ] orig_dir = orig_dir.rstrip('/') files = scan_dir(orig_dir,tags=tags,md5_hash=True) dups = find_dups(files) for dup in dups: nl.notify('Found duplicates of %s...' % dup[0]) for each_dup in dup[1:]: nl.notify('\tdeleting %s' % each_dup) try: os.remove(each_dup) except IOError: nl.notify('\t[failed]') del(files[each_dup]) clustered = cluster_files(files) output_dir = '%s-sorted' % orig_dir for key in clustered: if (0x8,0x31) in clustered[key]['info']: clustered[key]['info'][(0x8,0x31)] = str(int(float(clustered[key]['info'][(0x8,0x31)]))) for t in tags: if t not in clustered[key]['info']: clustered[key]['info'][t] = '_' run_name = '-'.join([scrub_fname(str(clustered[key]['info'][x])) for x in tags])+'-%d_images' %len(clustered[key]['files']) run_dir = os.path.join(output_dir,run_name) nl.notify('Moving files into %s' % run_dir) try: if not os.path.exists(run_dir): os.makedirs(run_dir) except IOError: nl.notify('Error: failed to create directory %s' % run_dir) else: for f in clustered[key]['files']: try: dset_fname = os.path.split(f)[1] if dset_fname[0]=='.': dset_fname = '_' + dset_fname[1:] os.rename(f,os.path.join(run_dir,dset_fname)) except (IOError, OSError): pass for r,ds,fs in os.walk(output_dir,topdown=False): for d in ds: dname = os.path.join(r,d) if len(os.listdir(dname))==0: os.remove(dname)
python
def organize_dir(orig_dir): '''scans through the given directory and organizes DICOMs that look similar into subdirectories output directory is the ``orig_dir`` with ``-sorted`` appended to the end''' tags = [ (0x10,0x20), # Subj ID (0x8,0x21), # Date (0x8,0x31), # Time (0x8,0x103e) # Descr ] orig_dir = orig_dir.rstrip('/') files = scan_dir(orig_dir,tags=tags,md5_hash=True) dups = find_dups(files) for dup in dups: nl.notify('Found duplicates of %s...' % dup[0]) for each_dup in dup[1:]: nl.notify('\tdeleting %s' % each_dup) try: os.remove(each_dup) except IOError: nl.notify('\t[failed]') del(files[each_dup]) clustered = cluster_files(files) output_dir = '%s-sorted' % orig_dir for key in clustered: if (0x8,0x31) in clustered[key]['info']: clustered[key]['info'][(0x8,0x31)] = str(int(float(clustered[key]['info'][(0x8,0x31)]))) for t in tags: if t not in clustered[key]['info']: clustered[key]['info'][t] = '_' run_name = '-'.join([scrub_fname(str(clustered[key]['info'][x])) for x in tags])+'-%d_images' %len(clustered[key]['files']) run_dir = os.path.join(output_dir,run_name) nl.notify('Moving files into %s' % run_dir) try: if not os.path.exists(run_dir): os.makedirs(run_dir) except IOError: nl.notify('Error: failed to create directory %s' % run_dir) else: for f in clustered[key]['files']: try: dset_fname = os.path.split(f)[1] if dset_fname[0]=='.': dset_fname = '_' + dset_fname[1:] os.rename(f,os.path.join(run_dir,dset_fname)) except (IOError, OSError): pass for r,ds,fs in os.walk(output_dir,topdown=False): for d in ds: dname = os.path.join(r,d) if len(os.listdir(dname))==0: os.remove(dname)
[ "def", "organize_dir", "(", "orig_dir", ")", ":", "tags", "=", "[", "(", "0x10", ",", "0x20", ")", ",", "# Subj ID", "(", "0x8", ",", "0x21", ")", ",", "# Date", "(", "0x8", ",", "0x31", ")", ",", "# Time", "(", "0x8", ",", "0x103e", ")", "# Desc...
scans through the given directory and organizes DICOMs that look similar into subdirectories output directory is the ``orig_dir`` with ``-sorted`` appended to the end
[ "scans", "through", "the", "given", "directory", "and", "organizes", "DICOMs", "that", "look", "similar", "into", "subdirectories" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L438-L491
azraq27/neural
neural/dicom.py
classify
def classify(label_dict,image_fname=None,image_label=None): '''tries to classify a DICOM image based on known string patterns (with fuzzy matching) Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``, and it will try to match that. ``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type. For example:: { 'anatomy': ['SPGR','MPRAGE','anat','anatomy'], 'dti': ['DTI'], 'field_map': ['fieldmap','TE7','B0'] } ''' min_acceptable_match = 80 if image_fname: label_info = info_for_tags(image_fname,[(0x8,0x103e)]) image_label = label_info[(0x8,0x103e)] # creates a list of tuples: (type, keyword) flat_dict = [i for j in [[(b,x) for x in label_dict[b]] for b in label_dict] for i in j] best_match = process.extractOne(image_label,[x[1] for x in flat_dict]) if best_match[1]<min_acceptable_match: return None else: return [x[0] for x in flat_dict if x[1]==best_match[0]][0]
python
def classify(label_dict,image_fname=None,image_label=None): '''tries to classify a DICOM image based on known string patterns (with fuzzy matching) Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``, and it will try to match that. ``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type. For example:: { 'anatomy': ['SPGR','MPRAGE','anat','anatomy'], 'dti': ['DTI'], 'field_map': ['fieldmap','TE7','B0'] } ''' min_acceptable_match = 80 if image_fname: label_info = info_for_tags(image_fname,[(0x8,0x103e)]) image_label = label_info[(0x8,0x103e)] # creates a list of tuples: (type, keyword) flat_dict = [i for j in [[(b,x) for x in label_dict[b]] for b in label_dict] for i in j] best_match = process.extractOne(image_label,[x[1] for x in flat_dict]) if best_match[1]<min_acceptable_match: return None else: return [x[0] for x in flat_dict if x[1]==best_match[0]][0]
[ "def", "classify", "(", "label_dict", ",", "image_fname", "=", "None", ",", "image_label", "=", "None", ")", ":", "min_acceptable_match", "=", "80", "if", "image_fname", ":", "label_info", "=", "info_for_tags", "(", "image_fname", ",", "[", "(", "0x8", ",", ...
tries to classify a DICOM image based on known string patterns (with fuzzy matching) Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``, and it will try to match that. ``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type. For example:: { 'anatomy': ['SPGR','MPRAGE','anat','anatomy'], 'dti': ['DTI'], 'field_map': ['fieldmap','TE7','B0'] }
[ "tries", "to", "classify", "a", "DICOM", "image", "based", "on", "known", "string", "patterns", "(", "with", "fuzzy", "matching", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L493-L519
azraq27/neural
neural/dicom.py
reconstruct_files
def reconstruct_files(input_dir): '''sorts ``input_dir`` and tries to reconstruct the subdirectories found''' input_dir = input_dir.rstrip('/') with nl.notify('Attempting to organize/reconstruct directory'): # Some datasets start with a ".", which confuses many programs for r,ds,fs in os.walk(input_dir): for f in fs: if f[0]=='.': shutil.move(os.path.join(r,f),os.path.join(r,'i'+f)) nl.dicom.organize_dir(input_dir) output_dir = '%s-sorted' % input_dir if os.path.exists(output_dir): with nl.run_in(output_dir): for dset_dir in os.listdir('.'): with nl.notify('creating dataset from %s' % dset_dir): nl.dicom.create_dset(dset_dir) else: nl.notify('Warning: failed to auto-organize directory %s' % input_dir,level=nl.level.warning)
python
def reconstruct_files(input_dir): '''sorts ``input_dir`` and tries to reconstruct the subdirectories found''' input_dir = input_dir.rstrip('/') with nl.notify('Attempting to organize/reconstruct directory'): # Some datasets start with a ".", which confuses many programs for r,ds,fs in os.walk(input_dir): for f in fs: if f[0]=='.': shutil.move(os.path.join(r,f),os.path.join(r,'i'+f)) nl.dicom.organize_dir(input_dir) output_dir = '%s-sorted' % input_dir if os.path.exists(output_dir): with nl.run_in(output_dir): for dset_dir in os.listdir('.'): with nl.notify('creating dataset from %s' % dset_dir): nl.dicom.create_dset(dset_dir) else: nl.notify('Warning: failed to auto-organize directory %s' % input_dir,level=nl.level.warning)
[ "def", "reconstruct_files", "(", "input_dir", ")", ":", "input_dir", "=", "input_dir", ".", "rstrip", "(", "'/'", ")", "with", "nl", ".", "notify", "(", "'Attempting to organize/reconstruct directory'", ")", ":", "# Some datasets start with a \".\", which confuses many pr...
sorts ``input_dir`` and tries to reconstruct the subdirectories found
[ "sorts", "input_dir", "and", "tries", "to", "reconstruct", "the", "subdirectories", "found" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L521-L538
azraq27/neural
neural/dicom.py
unpack_archive
def unpack_archive(fname,out_dir): '''unpacks the archive file ``fname`` and reconstructs datasets into ``out_dir`` Datasets are reconstructed and auto-named using :meth:`create_dset`. The raw directories that made the datasets are archive with the dataset name suffixed by ``tgz``, and any other files found in the archive are put into ``other_files.tgz``''' with nl.notify('Unpacking archive %s' % fname): tmp_dir = tempfile.mkdtemp() tmp_unpack = os.path.join(tmp_dir,'unpack') os.makedirs(tmp_unpack) nl.utils.unarchive(fname,tmp_unpack) reconstruct_files(tmp_unpack) out_dir = os.path.abspath(out_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) if not os.path.exists(tmp_unpack+'-sorted'): return with nl.run_in(tmp_unpack+'-sorted'): for fname in glob.glob('*.nii'): nl.run(['gzip',fname]) for fname in glob.glob('*.nii.gz'): new_file = os.path.join(out_dir,fname) if not os.path.exists(new_file): shutil.move(fname,new_file) raw_out = os.path.join(out_dir,'raw') if not os.path.exists(raw_out): os.makedirs(raw_out) for rawdir in os.listdir('.'): rawdir_tgz = os.path.join(raw_out,rawdir+'.tgz') if not os.path.exists(rawdir_tgz): with tarfile.open(rawdir_tgz,'w:gz') as tgz: tgz.add(rawdir) if len(os.listdir(tmp_unpack))!=0: # There are still raw files left with tarfile.open(os.path.join(raw_out,'other_files.tgz'),'w:gz') as tgz: tgz.add(tmp_unpack) shutil.rmtree(tmp_dir)
python
def unpack_archive(fname,out_dir): '''unpacks the archive file ``fname`` and reconstructs datasets into ``out_dir`` Datasets are reconstructed and auto-named using :meth:`create_dset`. The raw directories that made the datasets are archive with the dataset name suffixed by ``tgz``, and any other files found in the archive are put into ``other_files.tgz``''' with nl.notify('Unpacking archive %s' % fname): tmp_dir = tempfile.mkdtemp() tmp_unpack = os.path.join(tmp_dir,'unpack') os.makedirs(tmp_unpack) nl.utils.unarchive(fname,tmp_unpack) reconstruct_files(tmp_unpack) out_dir = os.path.abspath(out_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) if not os.path.exists(tmp_unpack+'-sorted'): return with nl.run_in(tmp_unpack+'-sorted'): for fname in glob.glob('*.nii'): nl.run(['gzip',fname]) for fname in glob.glob('*.nii.gz'): new_file = os.path.join(out_dir,fname) if not os.path.exists(new_file): shutil.move(fname,new_file) raw_out = os.path.join(out_dir,'raw') if not os.path.exists(raw_out): os.makedirs(raw_out) for rawdir in os.listdir('.'): rawdir_tgz = os.path.join(raw_out,rawdir+'.tgz') if not os.path.exists(rawdir_tgz): with tarfile.open(rawdir_tgz,'w:gz') as tgz: tgz.add(rawdir) if len(os.listdir(tmp_unpack))!=0: # There are still raw files left with tarfile.open(os.path.join(raw_out,'other_files.tgz'),'w:gz') as tgz: tgz.add(tmp_unpack) shutil.rmtree(tmp_dir)
[ "def", "unpack_archive", "(", "fname", ",", "out_dir", ")", ":", "with", "nl", ".", "notify", "(", "'Unpacking archive %s'", "%", "fname", ")", ":", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "tmp_unpack", "=", "os", ".", "path", ".", "join", ...
unpacks the archive file ``fname`` and reconstructs datasets into ``out_dir`` Datasets are reconstructed and auto-named using :meth:`create_dset`. The raw directories that made the datasets are archive with the dataset name suffixed by ``tgz``, and any other files found in the archive are put into ``other_files.tgz``
[ "unpacks", "the", "archive", "file", "fname", "and", "reconstructs", "datasets", "into", "out_dir" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L540-L576
azraq27/neural
neural/dicom.py
DicomInfo.addr
def addr(self,address): '''returns dictionary with frame information for given address (a tuple of two hex numbers)''' if isinstance(address,basestring): # If you just gave me a single string, assume its "XXXX XXXX" addr = address.split() else: addr = list(address) # Convert to actual hex if you give me strings for i in xrange(len(addr)): if isinstance(addr[i],basestring): addr[i] = int(addr[i],16) for frame in self.raw_frames: if frame['addr']==address: return frame
python
def addr(self,address): '''returns dictionary with frame information for given address (a tuple of two hex numbers)''' if isinstance(address,basestring): # If you just gave me a single string, assume its "XXXX XXXX" addr = address.split() else: addr = list(address) # Convert to actual hex if you give me strings for i in xrange(len(addr)): if isinstance(addr[i],basestring): addr[i] = int(addr[i],16) for frame in self.raw_frames: if frame['addr']==address: return frame
[ "def", "addr", "(", "self", ",", "address", ")", ":", "if", "isinstance", "(", "address", ",", "basestring", ")", ":", "# If you just gave me a single string, assume its \"XXXX XXXX\"", "addr", "=", "address", ".", "split", "(", ")", "else", ":", "addr", "=", ...
returns dictionary with frame information for given address (a tuple of two hex numbers)
[ "returns", "dictionary", "with", "frame", "information", "for", "given", "address", "(", "a", "tuple", "of", "two", "hex", "numbers", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dicom.py#L41-L54
Laufire/ec
ec/modules/exposed.py
static
def static(cls): r"""Converts the given class into a static one, by changing all the methods of it into static methods. Args: cls (class): The class to be converted. """ for attr in dir(cls): im_func = getattr(getattr(cls, attr), 'im_func', None) if im_func: setattr(cls, attr, staticmethod(im_func)) return cls
python
def static(cls): r"""Converts the given class into a static one, by changing all the methods of it into static methods. Args: cls (class): The class to be converted. """ for attr in dir(cls): im_func = getattr(getattr(cls, attr), 'im_func', None) if im_func: setattr(cls, attr, staticmethod(im_func)) return cls
[ "def", "static", "(", "cls", ")", ":", "for", "attr", "in", "dir", "(", "cls", ")", ":", "im_func", "=", "getattr", "(", "getattr", "(", "cls", ",", "attr", ")", ",", "'im_func'", ",", "None", ")", "if", "im_func", ":", "setattr", "(", "cls", ","...
r"""Converts the given class into a static one, by changing all the methods of it into static methods. Args: cls (class): The class to be converted.
[ "r", "Converts", "the", "given", "class", "into", "a", "static", "one", "by", "changing", "all", "the", "methods", "of", "it", "into", "static", "methods", "." ]
train
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/exposed.py#L8-L19
rcbops/flake8-filename
flake8_filename/rules.py
_generate_mark_code
def _generate_mark_code(rule_name): """Generates a two digit string based on a provided string Args: rule_name (str): A configured rule name 'pytest_mark3'. Returns: str: A two digit code based on the provided string '03' """ code = ''.join([i for i in str(rule_name) if i.isdigit()]) code = code.zfill(2) return code
python
def _generate_mark_code(rule_name): """Generates a two digit string based on a provided string Args: rule_name (str): A configured rule name 'pytest_mark3'. Returns: str: A two digit code based on the provided string '03' """ code = ''.join([i for i in str(rule_name) if i.isdigit()]) code = code.zfill(2) return code
[ "def", "_generate_mark_code", "(", "rule_name", ")", ":", "code", "=", "''", ".", "join", "(", "[", "i", "for", "i", "in", "str", "(", "rule_name", ")", "if", "i", ".", "isdigit", "(", ")", "]", ")", "code", "=", "code", ".", "zfill", "(", "2", ...
Generates a two digit string based on a provided string Args: rule_name (str): A configured rule name 'pytest_mark3'. Returns: str: A two digit code based on the provided string '03'
[ "Generates", "a", "two", "digit", "string", "based", "on", "a", "provided", "string" ]
train
https://github.com/rcbops/flake8-filename/blob/5718d4af394c318d376de7434193543e0da45651/flake8_filename/rules.py#L7-L18
rcbops/flake8-filename
flake8_filename/rules.py
rule_n5xx
def rule_n5xx(filename, rule_name, rule_conf, class_type): """Validate filename against a pattern if the filename passes the filter. Args: filename (str): The name of the file being parsed by flake8. rule_name (str): The name of the rule. rule_conf (dict): The dictionary containing the properties of the rule class_type (class): The class that this rule was called from Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation """ line_num = 0 code = _generate_mark_code(rule_name) message = "N5{} filename failed regex validation '{}'".format(code, rule_conf['filename_regex']) sanitized_filename = splitext(basename(filename))[0] # Strip path and extension if re.match(rule_conf['filter_regex'], sanitized_filename): if not re.match(rule_conf['filename_regex'], sanitized_filename): yield (line_num, 0, message, class_type)
python
def rule_n5xx(filename, rule_name, rule_conf, class_type): """Validate filename against a pattern if the filename passes the filter. Args: filename (str): The name of the file being parsed by flake8. rule_name (str): The name of the rule. rule_conf (dict): The dictionary containing the properties of the rule class_type (class): The class that this rule was called from Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation """ line_num = 0 code = _generate_mark_code(rule_name) message = "N5{} filename failed regex validation '{}'".format(code, rule_conf['filename_regex']) sanitized_filename = splitext(basename(filename))[0] # Strip path and extension if re.match(rule_conf['filter_regex'], sanitized_filename): if not re.match(rule_conf['filename_regex'], sanitized_filename): yield (line_num, 0, message, class_type)
[ "def", "rule_n5xx", "(", "filename", ",", "rule_name", ",", "rule_conf", ",", "class_type", ")", ":", "line_num", "=", "0", "code", "=", "_generate_mark_code", "(", "rule_name", ")", "message", "=", "\"N5{} filename failed regex validation '{}'\"", ".", "format", ...
Validate filename against a pattern if the filename passes the filter. Args: filename (str): The name of the file being parsed by flake8. rule_name (str): The name of the rule. rule_conf (dict): The dictionary containing the properties of the rule class_type (class): The class that this rule was called from Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation
[ "Validate", "filename", "against", "a", "pattern", "if", "the", "filename", "passes", "the", "filter", "." ]
train
https://github.com/rcbops/flake8-filename/blob/5718d4af394c318d376de7434193543e0da45651/flake8_filename/rules.py#L21-L42
CodyKochmann/time_limit
time_limit/__init__.py
timeout
def timeout(seconds=10, default_output='default_output'): """ function wrapper that limits the amount of time it has to run optional args: seconds - how long it has until the function times out default_output - what will be returned instead of an error """ def decorator(func): def _handle_timeout(signum, frame): """ throw the custom TimeoutError if called """ raise TimeoutError(strerror(ETIME)) def wrapper(*args, **kwargs): """ main wrapper for the error """ # set up the propper error signal signal(SIGALRM, _handle_timeout) # set the time the function has to run alarm(seconds) try: result = func(*args, **kwargs) except TimeoutError: if default_output == 'default_output': raise else: result = default_output finally: # cancel the timer alarm(0) return result return wraps(func)(wrapper) return decorator
python
def timeout(seconds=10, default_output='default_output'): """ function wrapper that limits the amount of time it has to run optional args: seconds - how long it has until the function times out default_output - what will be returned instead of an error """ def decorator(func): def _handle_timeout(signum, frame): """ throw the custom TimeoutError if called """ raise TimeoutError(strerror(ETIME)) def wrapper(*args, **kwargs): """ main wrapper for the error """ # set up the propper error signal signal(SIGALRM, _handle_timeout) # set the time the function has to run alarm(seconds) try: result = func(*args, **kwargs) except TimeoutError: if default_output == 'default_output': raise else: result = default_output finally: # cancel the timer alarm(0) return result return wraps(func)(wrapper) return decorator
[ "def", "timeout", "(", "seconds", "=", "10", ",", "default_output", "=", "'default_output'", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "_handle_timeout", "(", "signum", ",", "frame", ")", ":", "\"\"\" throw the custom TimeoutError if called \"\"\...
function wrapper that limits the amount of time it has to run optional args: seconds - how long it has until the function times out default_output - what will be returned instead of an error
[ "function", "wrapper", "that", "limits", "the", "amount", "of", "time", "it", "has", "to", "run", "optional", "args", ":", "seconds", "-", "how", "long", "it", "has", "until", "the", "function", "times", "out", "default_output", "-", "what", "will", "be", ...
train
https://github.com/CodyKochmann/time_limit/blob/447a640d3e187bb4775d780b757c6d9bdc88ae34/time_limit/__init__.py#L38-L68
ttm/socialLegacy
social/fb/gml2rdf.py
triplifyGML
def triplifyGML(dpath="../data/fb/",fname="foo.gdf",fnamei="foo_interaction.gdf", fpath="./fb/",scriptpath=None,uid=None,sid=None,fb_link=None,ego=True,umbrella_dir=None): """Produce a linked data publication tree from a standard GML file. INPUTS: ====== => the data directory path => the file name (fname) of the friendship network => the file name (fnamei) of the interaction network => the final path (fpath) for the tree of files to be created => a path to the script that is calling this function (scriptpath) => the numeric id (uid) of the facebook user or group of the network(s) => the numeric id (sid) of the facebook user or group of the network (s) => the facebook link (fb_link) of the user or group => the network is from a user (ego==True) or a group (ego==False) OUTPUTS: ======= the tree in the directory fpath.""" c("iniciado tripgml") if sum(c.isdigit() for c in fname)==4: year=re.findall(r".*(\d\d\d\d).gml",fname)[0][0] B.datetime_snapshot=datetime.date(*[int(i) for i in (year)]) if sum(c.isdigit() for c in fname)==12: day,month,year,hour,minute=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d).gml",fname)[0] B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]) if sum(c.isdigit() for c in fname)==14: day,month,year,hour,minute,second=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d)(\d\d).gml",fname)[0] B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute,second)]) elif sum(c.isdigit() for c in fname)==8: day,month,year=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0] B.datetime_snapshot=datetime.date(*[int(i) for i in (year,month,day)]) B.datetime_snapshot_=datetime_snapshot.isoformat() B.fname=fname B.fnamei=fnamei B.name=fname.replace(".gml","_gml") if fnamei: B.namei=fnamei[:-4] B.ego=ego B.friendship=bool(fname) B.interaction=bool(fnamei) B.sid=sid B.uid=uid B.scriptpath=scriptpath B.fb_link=fb_link B.dpath=dpath B.fpath=fpath B.prefix="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir) B.umbrella_dir=umbrella_dir c("antes de ler") #fnet=S.fb.readGML(dpath+fname) # return networkx graph fnet=S.fb.readGML2(dpath+fname) # return networkx graph # return fnet c("depois de ler, antes de fazer rdf") fnet_=rdfFriendshipNetwork(fnet) # return rdflib graph if B.interaction: inet=S.fb.readGML(dpath+fnamei) # return networkx graph inet_=rdfInteractionNetwork(inet) # return rdflib graph else: inet_=0 meta=makeMetadata(fnet_,inet_) # return rdflib graph with metadata about the structure c("depois de rdf, escrita em disco") writeAllFB(fnet_,inet_,meta) # write linked data tree c("cabo")
python
def triplifyGML(dpath="../data/fb/",fname="foo.gdf",fnamei="foo_interaction.gdf", fpath="./fb/",scriptpath=None,uid=None,sid=None,fb_link=None,ego=True,umbrella_dir=None): """Produce a linked data publication tree from a standard GML file. INPUTS: ====== => the data directory path => the file name (fname) of the friendship network => the file name (fnamei) of the interaction network => the final path (fpath) for the tree of files to be created => a path to the script that is calling this function (scriptpath) => the numeric id (uid) of the facebook user or group of the network(s) => the numeric id (sid) of the facebook user or group of the network (s) => the facebook link (fb_link) of the user or group => the network is from a user (ego==True) or a group (ego==False) OUTPUTS: ======= the tree in the directory fpath.""" c("iniciado tripgml") if sum(c.isdigit() for c in fname)==4: year=re.findall(r".*(\d\d\d\d).gml",fname)[0][0] B.datetime_snapshot=datetime.date(*[int(i) for i in (year)]) if sum(c.isdigit() for c in fname)==12: day,month,year,hour,minute=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d).gml",fname)[0] B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]) if sum(c.isdigit() for c in fname)==14: day,month,year,hour,minute,second=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d)(\d\d).gml",fname)[0] B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute,second)]) elif sum(c.isdigit() for c in fname)==8: day,month,year=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0] B.datetime_snapshot=datetime.date(*[int(i) for i in (year,month,day)]) B.datetime_snapshot_=datetime_snapshot.isoformat() B.fname=fname B.fnamei=fnamei B.name=fname.replace(".gml","_gml") if fnamei: B.namei=fnamei[:-4] B.ego=ego B.friendship=bool(fname) B.interaction=bool(fnamei) B.sid=sid B.uid=uid B.scriptpath=scriptpath B.fb_link=fb_link B.dpath=dpath B.fpath=fpath B.prefix="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir) B.umbrella_dir=umbrella_dir c("antes de ler") #fnet=S.fb.readGML(dpath+fname) # return networkx graph fnet=S.fb.readGML2(dpath+fname) # return networkx graph # return fnet c("depois de ler, antes de fazer rdf") fnet_=rdfFriendshipNetwork(fnet) # return rdflib graph if B.interaction: inet=S.fb.readGML(dpath+fnamei) # return networkx graph inet_=rdfInteractionNetwork(inet) # return rdflib graph else: inet_=0 meta=makeMetadata(fnet_,inet_) # return rdflib graph with metadata about the structure c("depois de rdf, escrita em disco") writeAllFB(fnet_,inet_,meta) # write linked data tree c("cabo")
[ "def", "triplifyGML", "(", "dpath", "=", "\"../data/fb/\"", ",", "fname", "=", "\"foo.gdf\"", ",", "fnamei", "=", "\"foo_interaction.gdf\"", ",", "fpath", "=", "\"./fb/\"", ",", "scriptpath", "=", "None", ",", "uid", "=", "None", ",", "sid", "=", "None", "...
Produce a linked data publication tree from a standard GML file. INPUTS: ====== => the data directory path => the file name (fname) of the friendship network => the file name (fnamei) of the interaction network => the final path (fpath) for the tree of files to be created => a path to the script that is calling this function (scriptpath) => the numeric id (uid) of the facebook user or group of the network(s) => the numeric id (sid) of the facebook user or group of the network (s) => the facebook link (fb_link) of the user or group => the network is from a user (ego==True) or a group (ego==False) OUTPUTS: ======= the tree in the directory fpath.
[ "Produce", "a", "linked", "data", "publication", "tree", "from", "a", "standard", "GML", "file", "." ]
train
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/gml2rdf.py#L4-L67
littlemo/moear-spider-zhihudaily
moear_spider_zhihudaily/entry.py
ZhihuDaily.register
def register(self, *args, **kwargs): ''' 调用方可根据主键字段进行爬虫的创建或更新操作 :return: 返回符合接口定义的字典数据 :rtype: dict ''' return { 'name': zhihu.name, 'display_name': zhihu.display_name, 'author': zhihu.author, 'email': zhihu.email, 'description': zhihu.description, 'meta': { # 爬取计划,参考 crontab 配置方法 'crawl_schedule': '0 23 * * *', # 执行爬取的随机延时,单位秒,用于避免被 Ban 'crawl_random_delay': str(60 * 60), 'package_module': 'mobi', 'language': 'zh-CN', 'book_mode': 'periodical', # 'periodical' | 'book' 'img_cover': os.path.join( _images_path, 'cv_zhihudaily.jpg'), 'img_masthead': os.path.join( _images_path, 'mh_zhihudaily.gif'), 'image_filter': json.dumps(['zhihu.com/equation']), 'css_package': os.path.join( _css_path, 'package.css') } }
python
def register(self, *args, **kwargs): ''' 调用方可根据主键字段进行爬虫的创建或更新操作 :return: 返回符合接口定义的字典数据 :rtype: dict ''' return { 'name': zhihu.name, 'display_name': zhihu.display_name, 'author': zhihu.author, 'email': zhihu.email, 'description': zhihu.description, 'meta': { # 爬取计划,参考 crontab 配置方法 'crawl_schedule': '0 23 * * *', # 执行爬取的随机延时,单位秒,用于避免被 Ban 'crawl_random_delay': str(60 * 60), 'package_module': 'mobi', 'language': 'zh-CN', 'book_mode': 'periodical', # 'periodical' | 'book' 'img_cover': os.path.join( _images_path, 'cv_zhihudaily.jpg'), 'img_masthead': os.path.join( _images_path, 'mh_zhihudaily.gif'), 'image_filter': json.dumps(['zhihu.com/equation']), 'css_package': os.path.join( _css_path, 'package.css') } }
[ "def", "register", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "{", "'name'", ":", "zhihu", ".", "name", ",", "'display_name'", ":", "zhihu", ".", "display_name", ",", "'author'", ":", "zhihu", ".", "author", ",", "'em...
调用方可根据主键字段进行爬虫的创建或更新操作 :return: 返回符合接口定义的字典数据 :rtype: dict
[ "调用方可根据主键字段进行爬虫的创建或更新操作" ]
train
https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/entry.py#L34-L65
littlemo/moear-spider-zhihudaily
moear_spider_zhihudaily/entry.py
ZhihuDaily.crawl
def crawl(self, *args, **kwargs): ''' 执行爬取操作,并阻塞直到爬取完成,返回结果数据。 此处考虑到 Scrapy 本身的并发特性,故通过临时文件方式做数据传递, 将临时路径传递到爬虫业务中,并在爬取结束后对文件进行读取、 JSON 反序列化,返回 :return: 返回符合接口定义的字典对象 :rtype: dict ''' temp = tempfile.NamedTemporaryFile(mode='w+t') try: crawler = CrawlerScript() # 调试时可指定明确日期参数,如:date='20180423' crawler.crawl(output_file=temp.name, *args, **kwargs) temp.seek(0) content = json.loads(temp.read(), encoding='UTF-8') finally: temp.close() print('抓取完毕!') return content
python
def crawl(self, *args, **kwargs): ''' 执行爬取操作,并阻塞直到爬取完成,返回结果数据。 此处考虑到 Scrapy 本身的并发特性,故通过临时文件方式做数据传递, 将临时路径传递到爬虫业务中,并在爬取结束后对文件进行读取、 JSON 反序列化,返回 :return: 返回符合接口定义的字典对象 :rtype: dict ''' temp = tempfile.NamedTemporaryFile(mode='w+t') try: crawler = CrawlerScript() # 调试时可指定明确日期参数,如:date='20180423' crawler.crawl(output_file=temp.name, *args, **kwargs) temp.seek(0) content = json.loads(temp.read(), encoding='UTF-8') finally: temp.close() print('抓取完毕!') return content
[ "def", "crawl", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w+t'", ")", "try", ":", "crawler", "=", "CrawlerScript", "(", ")", "# 调试时可指定明确日期参数,如:date='20180423'",...
执行爬取操作,并阻塞直到爬取完成,返回结果数据。 此处考虑到 Scrapy 本身的并发特性,故通过临时文件方式做数据传递, 将临时路径传递到爬虫业务中,并在爬取结束后对文件进行读取、 JSON 反序列化,返回 :return: 返回符合接口定义的字典对象 :rtype: dict
[ "执行爬取操作,并阻塞直到爬取完成,返回结果数据。", "此处考虑到", "Scrapy", "本身的并发特性,故通过临时文件方式做数据传递,", "将临时路径传递到爬虫业务中,并在爬取结束后对文件进行读取、", "JSON", "反序列化,返回" ]
train
https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/entry.py#L67-L89
littlemo/moear-spider-zhihudaily
moear_spider_zhihudaily/entry.py
ZhihuDaily.format
def format(self, data, *args, **kwargs): ''' 将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为 :meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度, 可以对非当日文章对象进行格式化,制作相关主题的合集书籍 :param data: 待处理的文章列表 :type data: list :return: 返回符合mobi打包需求的定制化数据结构 :rtype: dict ''' sections = OrderedDict() hot_list = [] normal_list = [] for item in data: meta = item.get('meta', []) # 如果标题为空,则迭代下一条目 if not item.get('title'): continue soup = BeautifulSoup(item.get('content'), "lxml") # 清洗文章内容,去除无用内容 for view_more in soup.select('.view-more'): view_more.extract() item['content'] = str(soup.div) # 处理文章摘要,若为空则根据正文自动生成并填充 if not item.get('excerpt') and item.get('content'): word_limit = self.options.get( 'toc_desc_word_limit', 500) content_list = soup.select('div.content') content_list = [content.get_text() for content in content_list] excerpt = ' '.join(content_list)[:word_limit] # 此处摘要信息需进行HTML转义,否则会造成toc.ncx中tag处理错误 item['excerpt'] = html.escape(excerpt) # 从item中提取出section分组 top = meta.pop('spider.zhihu_daily.top', '0') item['meta'] = meta if str(top) == '1': hot_list.append(item) else: normal_list.append(item) if hot_list: sections.setdefault('热闻', hot_list) if normal_list: sections.setdefault('日报', normal_list) return sections
python
def format(self, data, *args, **kwargs): ''' 将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为 :meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度, 可以对非当日文章对象进行格式化,制作相关主题的合集书籍 :param data: 待处理的文章列表 :type data: list :return: 返回符合mobi打包需求的定制化数据结构 :rtype: dict ''' sections = OrderedDict() hot_list = [] normal_list = [] for item in data: meta = item.get('meta', []) # 如果标题为空,则迭代下一条目 if not item.get('title'): continue soup = BeautifulSoup(item.get('content'), "lxml") # 清洗文章内容,去除无用内容 for view_more in soup.select('.view-more'): view_more.extract() item['content'] = str(soup.div) # 处理文章摘要,若为空则根据正文自动生成并填充 if not item.get('excerpt') and item.get('content'): word_limit = self.options.get( 'toc_desc_word_limit', 500) content_list = soup.select('div.content') content_list = [content.get_text() for content in content_list] excerpt = ' '.join(content_list)[:word_limit] # 此处摘要信息需进行HTML转义,否则会造成toc.ncx中tag处理错误 item['excerpt'] = html.escape(excerpt) # 从item中提取出section分组 top = meta.pop('spider.zhihu_daily.top', '0') item['meta'] = meta if str(top) == '1': hot_list.append(item) else: normal_list.append(item) if hot_list: sections.setdefault('热闻', hot_list) if normal_list: sections.setdefault('日报', normal_list) return sections
[ "def", "format", "(", "self", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "sections", "=", "OrderedDict", "(", ")", "hot_list", "=", "[", "]", "normal_list", "=", "[", "]", "for", "item", "in", "data", ":", "meta", "=", "ite...
将传入的Post列表数据进行格式化处理。此处传入的 ``data`` 格式即为 :meth:`.ZhihuDaily.crawl` 返回的格式,但具体内容可以不同,即此处保留了灵活度, 可以对非当日文章对象进行格式化,制作相关主题的合集书籍 :param data: 待处理的文章列表 :type data: list :return: 返回符合mobi打包需求的定制化数据结构 :rtype: dict
[ "将传入的Post列表数据进行格式化处理。此处传入的", "data", "格式即为", ":", "meth", ":", ".", "ZhihuDaily", ".", "crawl", "返回的格式,但具体内容可以不同,即此处保留了灵活度,", "可以对非当日文章对象进行格式化,制作相关主题的合集书籍" ]
train
https://github.com/littlemo/moear-spider-zhihudaily/blob/1e4e60b547afe3e2fbb3bbcb7d07a75dca608149/moear_spider_zhihudaily/entry.py#L91-L142
MacHu-GWU/pymongo_mate-project
pymongo_mate/crud/update.py
upsert_many
def upsert_many(col, data): """ Only used when having "_id" field. **中文文档** 要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行 ``upsert`` 操作。 """ ready_to_insert = list() for doc in data: res = col.update({"_id": doc["_id"]}, {"$set": doc}, upsert=False) # 没有任何数据被修改, 且不是因为数据存在但值相同 if res["nModified"] == 0 and res["updatedExisting"] is False: ready_to_insert.append(doc) col.insert(ready_to_insert)
python
def upsert_many(col, data): """ Only used when having "_id" field. **中文文档** 要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行 ``upsert`` 操作。 """ ready_to_insert = list() for doc in data: res = col.update({"_id": doc["_id"]}, {"$set": doc}, upsert=False) # 没有任何数据被修改, 且不是因为数据存在但值相同 if res["nModified"] == 0 and res["updatedExisting"] is False: ready_to_insert.append(doc) col.insert(ready_to_insert)
[ "def", "upsert_many", "(", "col", ",", "data", ")", ":", "ready_to_insert", "=", "list", "(", ")", "for", "doc", "in", "data", ":", "res", "=", "col", ".", "update", "(", "{", "\"_id\"", ":", "doc", "[", "\"_id\"", "]", "}", ",", "{", "\"$set\"", ...
Only used when having "_id" field. **中文文档** 要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行 ``upsert`` 操作。
[ "Only", "used", "when", "having", "_id", "field", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/crud/update.py#L13-L28
ttm/socialLegacy
social/fb/fb.py
triplifyGML
def triplifyGML(fname="foo.gml",fpath="./fb/",scriptpath=None,uid=None,sid=None,extra_info=None): """Produce a linked data publication tree from a standard GML file. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook user of which fname holds a friendship network => the numeric id (sid) of the facebook user of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.""" # aname=fname.split("/")[-1].split(".")[0] aname=fname.split("/")[-1].split(".")[0] if "RonaldCosta" in fname: aname=fname.split("/")[-1].split(".")[0] name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_="Ronald Scherolt Costa" elif "AntonioAnzoategui" in fname: aname=re.findall(".*/([a-zA-Z]*\d*)",fname)[0] name,year,month,day,hour,minute=re.findall(r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat()[:-3] name_="Antônio Anzoategui Fabbri" elif re.findall(".*/[a-zA-Z]*(\d)",fname): name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) elif re.findall("[a-zA-Z]*_",fname): name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) else: name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) aname+="_fb" name+="_fb" c("started snapshot",aname) tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"the {} facebook ego friendship network") tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"RDF metadata for the facebook friendship network of my son") snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot, aname,"Snapshot {}".format(aname)) extra_uri=extra_val=[] if extra_info: extra_uri=[NS.po.extraInfo] extra_val=[extra_info] P.rdf.link([tg2],snapshot,"Snapshot {}".format(aname), [P.rdf.ns.po.createdAt, P.rdf.ns.po.triplifiedIn, P.rdf.ns.po.donatedBy, P.rdf.ns.po.availableAt, P.rdf.ns.po.originalFile, P.rdf.ns.po.onlineTranslateXMLFile, P.rdf.ns.po.onlineTranslateTTLFile, P.rdf.ns.po.translateXMLFile, P.rdf.ns.po.translateTTLFile, P.rdf.ns.po.onlineMetaXMLFile, P.rdf.ns.po.onlineMetaTTLFile, P.rdf.ns.po.metaXMLFilename, P.rdf.ns.po.metaTTLFilename, P.rdf.ns.po.acquiredThrough, P.rdf.ns.rdfs.comment, P.rdf.ns.fb.uid, P.rdf.ns.fb.sid ]+extra_uri, [datetime_snapshot, datetime.datetime.now(), name, "https://github.com/ttm/{}".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")[-1]), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname), "{}Translate.rdf".format(aname), "{}Translate.ttl".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname), "{}Meta.owl".format(aname), "{}Meta.ttl".format(aname), "Netvizz", "The facebook friendship network from {}".format(name_), uid, sid ]+extra_val) #for friend_attr in fg2["friends"]: c((aname,name_,datetime_snapshot)) fg2=x.read_gml(fname) c("read gml") for uid in fg2: c(uid) ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,"{}-{}".format(aname,uid)) if "locale" in fg2.node[uid].keys(): data=[fg2.node[uid][attr] for attr in ("id","label","locale","sex","agerank","wallcount")] uris=[NS.fb.gid, NS.fb.name, NS.fb.locale, NS.fb.sex, NS.fb.agerank,NS.fb.wallcount] else: data=[fg2.node[uid][attr] for attr in ("id","label","sex","agerank","wallcount")] uris=[NS.fb.gid, NS.fb.name, NS.fb.sex, NS.fb.agerank,NS.fb.wallcount] P.rdf.link([tg],ind, None,uris,data,draw=False) P.rdf.link_([tg],ind,None,[NS.po.snapshot],[snapshot],draw=False) #friends_=[fg2["friends"][i] for i in ("name","label","locale","sex","agerank")] #for name,label,locale,sex,agerank in zip(*friends_): # ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label) # P.rdf.link([tg],ind,label,[P.rdf.ns.fb.uid,P.rdf.ns.fb.name, # P.rdf.ns.fb.locale,P.rdf.ns.fb.sex, # P.rdf.ns.fb.agerank], # [name,label,locale,sex,agerank]) c("escritos participantes") #friendships_=[fg2["friendships"][i] for i in ("node1","node2")] i=1 for uid1,uid2 in fg2.edges(): flabel="{}-{}-{}".format(aname,uid1,uid2) ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel) uids=[P.rdf.IC(None,P.rdf.ns.fb.Participant,"{}-{}".format(aname,i)) for i in (uid1,uid2)] P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2, [snapshot]+uids,draw=False) P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1]) if (i%1000)==0: c(i) i+=1 c("escritas amizades") tg_=[tg[0]+tg2[0],tg[1]] fpath_="{}/{}/".format(fpath,aname) P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1) # copia o script que gera este codigo if not os.path.isdir(fpath_+"scripts"): os.mkdir(fpath_+"scripts") #shutil.copy(this_dir+"/../tests/rdfMyFNetwork2.py",fpath+"scripts/") shutil.copy(scriptpath,fpath_+"scripts/") # copia do base data if not os.path.isdir(fpath_+"base"): os.mkdir(fpath_+"base") shutil.copy(fname,fpath_+"base/") P.rdf.writeAll(tg2,aname+"Meta",fpath_,False) # faz um README with open(fpath_+"README","w") as f: f.write("""This repo delivers RDF data from the facebook friendship network of {} ({}) collected at {}. It has {} friends with metadata {}; and {} friendships. The linked data is available at rdf/ dir and was generated by the routine in the script/ directory. Original data from Netvizz in data/\n""".format( name_,aname,datetime_snapshot, fg2.number_of_nodes(), "name, locale (maybe), sex, agerank and wallcount", fg2.number_of_edges()))
python
def triplifyGML(fname="foo.gml",fpath="./fb/",scriptpath=None,uid=None,sid=None,extra_info=None): """Produce a linked data publication tree from a standard GML file. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook user of which fname holds a friendship network => the numeric id (sid) of the facebook user of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.""" # aname=fname.split("/")[-1].split(".")[0] aname=fname.split("/")[-1].split(".")[0] if "RonaldCosta" in fname: aname=fname.split("/")[-1].split(".")[0] name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_="Ronald Scherolt Costa" elif "AntonioAnzoategui" in fname: aname=re.findall(".*/([a-zA-Z]*\d*)",fname)[0] name,year,month,day,hour,minute=re.findall(r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat()[:-3] name_="Antônio Anzoategui Fabbri" elif re.findall(".*/[a-zA-Z]*(\d)",fname): name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) elif re.findall("[a-zA-Z]*_",fname): name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) else: name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) aname+="_fb" name+="_fb" c("started snapshot",aname) tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"the {} facebook ego friendship network") tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"RDF metadata for the facebook friendship network of my son") snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot, aname,"Snapshot {}".format(aname)) extra_uri=extra_val=[] if extra_info: extra_uri=[NS.po.extraInfo] extra_val=[extra_info] P.rdf.link([tg2],snapshot,"Snapshot {}".format(aname), [P.rdf.ns.po.createdAt, P.rdf.ns.po.triplifiedIn, P.rdf.ns.po.donatedBy, P.rdf.ns.po.availableAt, P.rdf.ns.po.originalFile, P.rdf.ns.po.onlineTranslateXMLFile, P.rdf.ns.po.onlineTranslateTTLFile, P.rdf.ns.po.translateXMLFile, P.rdf.ns.po.translateTTLFile, P.rdf.ns.po.onlineMetaXMLFile, P.rdf.ns.po.onlineMetaTTLFile, P.rdf.ns.po.metaXMLFilename, P.rdf.ns.po.metaTTLFilename, P.rdf.ns.po.acquiredThrough, P.rdf.ns.rdfs.comment, P.rdf.ns.fb.uid, P.rdf.ns.fb.sid ]+extra_uri, [datetime_snapshot, datetime.datetime.now(), name, "https://github.com/ttm/{}".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")[-1]), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname), "{}Translate.rdf".format(aname), "{}Translate.ttl".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname), "{}Meta.owl".format(aname), "{}Meta.ttl".format(aname), "Netvizz", "The facebook friendship network from {}".format(name_), uid, sid ]+extra_val) #for friend_attr in fg2["friends"]: c((aname,name_,datetime_snapshot)) fg2=x.read_gml(fname) c("read gml") for uid in fg2: c(uid) ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,"{}-{}".format(aname,uid)) if "locale" in fg2.node[uid].keys(): data=[fg2.node[uid][attr] for attr in ("id","label","locale","sex","agerank","wallcount")] uris=[NS.fb.gid, NS.fb.name, NS.fb.locale, NS.fb.sex, NS.fb.agerank,NS.fb.wallcount] else: data=[fg2.node[uid][attr] for attr in ("id","label","sex","agerank","wallcount")] uris=[NS.fb.gid, NS.fb.name, NS.fb.sex, NS.fb.agerank,NS.fb.wallcount] P.rdf.link([tg],ind, None,uris,data,draw=False) P.rdf.link_([tg],ind,None,[NS.po.snapshot],[snapshot],draw=False) #friends_=[fg2["friends"][i] for i in ("name","label","locale","sex","agerank")] #for name,label,locale,sex,agerank in zip(*friends_): # ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label) # P.rdf.link([tg],ind,label,[P.rdf.ns.fb.uid,P.rdf.ns.fb.name, # P.rdf.ns.fb.locale,P.rdf.ns.fb.sex, # P.rdf.ns.fb.agerank], # [name,label,locale,sex,agerank]) c("escritos participantes") #friendships_=[fg2["friendships"][i] for i in ("node1","node2")] i=1 for uid1,uid2 in fg2.edges(): flabel="{}-{}-{}".format(aname,uid1,uid2) ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel) uids=[P.rdf.IC(None,P.rdf.ns.fb.Participant,"{}-{}".format(aname,i)) for i in (uid1,uid2)] P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2, [snapshot]+uids,draw=False) P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1]) if (i%1000)==0: c(i) i+=1 c("escritas amizades") tg_=[tg[0]+tg2[0],tg[1]] fpath_="{}/{}/".format(fpath,aname) P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1) # copia o script que gera este codigo if not os.path.isdir(fpath_+"scripts"): os.mkdir(fpath_+"scripts") #shutil.copy(this_dir+"/../tests/rdfMyFNetwork2.py",fpath+"scripts/") shutil.copy(scriptpath,fpath_+"scripts/") # copia do base data if not os.path.isdir(fpath_+"base"): os.mkdir(fpath_+"base") shutil.copy(fname,fpath_+"base/") P.rdf.writeAll(tg2,aname+"Meta",fpath_,False) # faz um README with open(fpath_+"README","w") as f: f.write("""This repo delivers RDF data from the facebook friendship network of {} ({}) collected at {}. It has {} friends with metadata {}; and {} friendships. The linked data is available at rdf/ dir and was generated by the routine in the script/ directory. Original data from Netvizz in data/\n""".format( name_,aname,datetime_snapshot, fg2.number_of_nodes(), "name, locale (maybe), sex, agerank and wallcount", fg2.number_of_edges()))
[ "def", "triplifyGML", "(", "fname", "=", "\"foo.gml\"", ",", "fpath", "=", "\"./fb/\"", ",", "scriptpath", "=", "None", ",", "uid", "=", "None", ",", "sid", "=", "None", ",", "extra_info", "=", "None", ")", ":", "# aname=fname.split(\"/\")[-1].split(\".\")[...
Produce a linked data publication tree from a standard GML file. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook user of which fname holds a friendship network => the numeric id (sid) of the facebook user of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.
[ "Produce", "a", "linked", "data", "publication", "tree", "from", "a", "standard", "GML", "file", "." ]
train
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L11-L167
ttm/socialLegacy
social/fb/fb.py
triplifyGDFInteraction
def triplifyGDFInteraction(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None): """Produce a linked data publication tree from GDF files of a Facebook interaction network. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook group => the string id (sid) of the facebook group of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.""" #aname=fname.split("/")[-1].split(".")[0]+"_fb" aname=fname.split("/")[-1].split(".")[0] if re.findall("[a-zA-Z]*_[0-9]",fname): name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) elif re.findall("(\d)",fname): name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) else: datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",aname)) aname+="_fb" name=aname tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"The facebook interaction network from the {} file".format(fname)) # drop de agraph tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot, aname,"Snapshot {}".format(aname)) foo={"uris":[],"vals":[]} if sid: foo["uris"].append(P.rdf.ns.fb.sid) foo["vals"].append(sid) if uid: foo["uris"].append(P.rdf.ns.fb.uid) foo["vals"].append(uid) if dlink: foo["uris"].append(P.rdf.ns.fb.link) foo["vals"].append(dlink) P.rdf.link([tg2],ind,"Snapshot {}".format(aname), [P.rdf.ns.po.createdAt, P.rdf.ns.po.triplifiedIn, P.rdf.ns.po.donatedBy, P.rdf.ns.po.availableAt, P.rdf.ns.po.originalFile, P.rdf.ns.po.rdfFile, P.rdf.ns.po.ttlFile, P.rdf.ns.po.discorveryRDFFile, P.rdf.ns.po.discoveryTTLFile, P.rdf.ns.po.acquiredThrough, P.rdf.ns.rdfs.comment, ]+foo["uris"], [datetime_snapshot, datetime.datetime.now(), name, "https://github.com/ttm/{}".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname), "Netvizz", "The facebook friendship network from {}".format(name_), ]+foo["vals"]) #for friend_attr in fg2["friends"]: fg2=readGDF(fname) tkeys=list(fg2["friends"].keys()) def trans(tkey): if tkey=="name": return "uid" if tkey=="label": return "name" return tkey foo={"uris":[],"vals":[]} for tkey in tkeys: if tkey=="groupid": P.rdf.link([tg2],ind,"Snapshot {}".format(aname), [P.rdf.ns.po.uid,], [fg2["friends"][tkey][0]]) if tkey: foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))] foo["vals"]+=[fg2["friends"][tkey]] print(tkeys) iname=tkeys.index("name") ilabel=tkeys.index("label") icount=0 name_label={} for vals_ in zip(*foo["vals"]): name,label=[foo["vals"][i][icount] for i in (iname,ilabel)] if not label: label="po:noname" vals_=list(vals_) vals_[ilabel]=label name_label[name]=label ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label) P.rdf.link([tg],ind,label,foo["uris"], vals_,draw=False) icount+=1 friendships_=[fg2["friendships"][i] for i in ("node1","node2")] c("escritos participantes") i=1 for uid1,uid2 in zip(*friendships_): flabel="{}-{}".format(uid1,uid2) labels=[name_label[uu] for uu in (uid1,uid2)] ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship, flabel) #flabel,"Friendship "+flabel) ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1) ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2) uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)] P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2, uids,labels,draw=False) P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1]) if (i%1000)==0: c(i) i+=1 P.rdf.G(tg[0],P.rdf.ns.fb.friend, P.rdf.ns.rdf.type, P.rdf.ns.owl.SymmetricProperty) c("escritas amizades") tg_=[tg[0]+tg2[0],tg[1]] fpath_="{}{}/".format(fpath,aname) P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1) # copia o script que gera este codigo if not os.path.isdir(fpath_+"scripts"): os.mkdir(fpath_+"scripts") shutil.copy(scriptpath,fpath_+"scripts/") # copia do base data if not os.path.isdir(fpath_+"base"): os.mkdir(fpath_+"base") shutil.copy(fname,fpath_+"base/") P.rdf.writeAll(tg2,aname+"Meta",fpath_,1) # faz um README with open(fpath_+"README","w") as f: f.write("""This repo delivers RDF data from the facebook friendship network of {} collected at {}. It has {} friends with metadata {}; and {} friendships. The linked data is available at rdf/ dir and was generated by the routine in the script/ directory. Original data from Netvizz in data/\n""".format( name_,datetime_snapshot, len(fg2["friends"]["name"]), "facebook numeric id, name, locale, sex and agerank", len(fg2["friendships"]["node1"]) ))
python
def triplifyGDFInteraction(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None): """Produce a linked data publication tree from GDF files of a Facebook interaction network. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook group => the string id (sid) of the facebook group of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.""" #aname=fname.split("/")[-1].split(".")[0]+"_fb" aname=fname.split("/")[-1].split(".")[0] if re.findall("[a-zA-Z]*_[0-9]",fname): name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) elif re.findall("(\d)",fname): name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0] datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",name)) else: datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0] name_=" ".join(re.findall("[A-Z][^A-Z]*",aname)) aname+="_fb" name=aname tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"The facebook interaction network from the {} file".format(fname)) # drop de agraph tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot, aname,"Snapshot {}".format(aname)) foo={"uris":[],"vals":[]} if sid: foo["uris"].append(P.rdf.ns.fb.sid) foo["vals"].append(sid) if uid: foo["uris"].append(P.rdf.ns.fb.uid) foo["vals"].append(uid) if dlink: foo["uris"].append(P.rdf.ns.fb.link) foo["vals"].append(dlink) P.rdf.link([tg2],ind,"Snapshot {}".format(aname), [P.rdf.ns.po.createdAt, P.rdf.ns.po.triplifiedIn, P.rdf.ns.po.donatedBy, P.rdf.ns.po.availableAt, P.rdf.ns.po.originalFile, P.rdf.ns.po.rdfFile, P.rdf.ns.po.ttlFile, P.rdf.ns.po.discorveryRDFFile, P.rdf.ns.po.discoveryTTLFile, P.rdf.ns.po.acquiredThrough, P.rdf.ns.rdfs.comment, ]+foo["uris"], [datetime_snapshot, datetime.datetime.now(), name, "https://github.com/ttm/{}".format(aname), "https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname), "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname), "Netvizz", "The facebook friendship network from {}".format(name_), ]+foo["vals"]) #for friend_attr in fg2["friends"]: fg2=readGDF(fname) tkeys=list(fg2["friends"].keys()) def trans(tkey): if tkey=="name": return "uid" if tkey=="label": return "name" return tkey foo={"uris":[],"vals":[]} for tkey in tkeys: if tkey=="groupid": P.rdf.link([tg2],ind,"Snapshot {}".format(aname), [P.rdf.ns.po.uid,], [fg2["friends"][tkey][0]]) if tkey: foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))] foo["vals"]+=[fg2["friends"][tkey]] print(tkeys) iname=tkeys.index("name") ilabel=tkeys.index("label") icount=0 name_label={} for vals_ in zip(*foo["vals"]): name,label=[foo["vals"][i][icount] for i in (iname,ilabel)] if not label: label="po:noname" vals_=list(vals_) vals_[ilabel]=label name_label[name]=label ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label) P.rdf.link([tg],ind,label,foo["uris"], vals_,draw=False) icount+=1 friendships_=[fg2["friendships"][i] for i in ("node1","node2")] c("escritos participantes") i=1 for uid1,uid2 in zip(*friendships_): flabel="{}-{}".format(uid1,uid2) labels=[name_label[uu] for uu in (uid1,uid2)] ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship, flabel) #flabel,"Friendship "+flabel) ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1) ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2) uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)] P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2, uids,labels,draw=False) P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1]) if (i%1000)==0: c(i) i+=1 P.rdf.G(tg[0],P.rdf.ns.fb.friend, P.rdf.ns.rdf.type, P.rdf.ns.owl.SymmetricProperty) c("escritas amizades") tg_=[tg[0]+tg2[0],tg[1]] fpath_="{}{}/".format(fpath,aname) P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1) # copia o script que gera este codigo if not os.path.isdir(fpath_+"scripts"): os.mkdir(fpath_+"scripts") shutil.copy(scriptpath,fpath_+"scripts/") # copia do base data if not os.path.isdir(fpath_+"base"): os.mkdir(fpath_+"base") shutil.copy(fname,fpath_+"base/") P.rdf.writeAll(tg2,aname+"Meta",fpath_,1) # faz um README with open(fpath_+"README","w") as f: f.write("""This repo delivers RDF data from the facebook friendship network of {} collected at {}. It has {} friends with metadata {}; and {} friendships. The linked data is available at rdf/ dir and was generated by the routine in the script/ directory. Original data from Netvizz in data/\n""".format( name_,datetime_snapshot, len(fg2["friends"]["name"]), "facebook numeric id, name, locale, sex and agerank", len(fg2["friendships"]["node1"]) ))
[ "def", "triplifyGDFInteraction", "(", "fname", "=", "\"foo.gdf\"", ",", "fpath", "=", "\"./fb/\"", ",", "scriptpath", "=", "None", ",", "uid", "=", "None", ",", "sid", "=", "None", ",", "dlink", "=", "None", ")", ":", "#aname=fname.split(\"/\")[-1].split(\".\"...
Produce a linked data publication tree from GDF files of a Facebook interaction network. INPUTS: => the file name (fname, with path) where the gdf file of the friendship network is. => the final path (fpath) for the tree of files to be created. => a path to the script that is calling this function (scriptpath). => the numeric id (uid) of the facebook group => the string id (sid) of the facebook group of which fname holds a friendship network OUTPUTS: the tree in the directory fpath.
[ "Produce", "a", "linked", "data", "publication", "tree", "from", "GDF", "files", "of", "a", "Facebook", "interaction", "network", "." ]
train
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L170-L326
ttm/socialLegacy
social/fb/fb.py
ScrapyBrowser.getFriends
def getFriends(self,user_id="astronauta.mecanico",write=True): """Returns user_ids (that you have access) of the friends of your friend with user_ids""" while user_id not in self.browser.url: self.browser.visit("http://www.facebook.com/{}/friends".format(user_id), wait_time=3) #self.go("http://www.facebook.com/{}/friends".format(user_id)) T=time.time() while 1: h1=self.browser.evaluate_script("document.body.scrollHeight") self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);") h2=self.browser.evaluate_script("document.body.scrollHeight") if h1 != h2: T=time.time() elif time.time()-T>10: break #links=self.browser.find_link_by_partial_href("hc_location=friends_tab") links=self.browser.find_by_css(".fcb") friends=[] for link in links: name=link.value user_id_=link.find_by_tag("a")["href"].split("/")[-1].split("?")[0] friends.append((user_id_,name)) tdict={} tdict["name"]=self.browser.find_by_id("fb-timeline-cover-name").value tdict["user_id"]=user_id tdict["friends"]=friends infos=self.browser.find_by_css("._3c_") mutual=0 for info in infos: if info.value=="Mutual Friends": if info.find_by_css("._3d0").value: tdict["n_mutual"]=info.find_by_css("._3d0").value mutual=1 if info.value=="All Friends": tdict["n_friends"]=info.find_by_css("._3d0").value if mutual==0: links=self.browser.find_by_css("._gs6") if "Mutual" in links.value: tdict["n_mutual"]=links.value.split(" ")[0] if write: if not os.path.isdir("{}/fb_ids/".format(self._BASE_DIR)): os.mkdir("{}/fb_ids/".format(self._BASE_DIR)) with open("{}fb_ids/{}.pickle".format(self._BASE_DIR,user_id),"wb") as f: pickle.dump(tdict,f) self.tdict=tdict return tdict
python
def getFriends(self,user_id="astronauta.mecanico",write=True): """Returns user_ids (that you have access) of the friends of your friend with user_ids""" while user_id not in self.browser.url: self.browser.visit("http://www.facebook.com/{}/friends".format(user_id), wait_time=3) #self.go("http://www.facebook.com/{}/friends".format(user_id)) T=time.time() while 1: h1=self.browser.evaluate_script("document.body.scrollHeight") self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);") h2=self.browser.evaluate_script("document.body.scrollHeight") if h1 != h2: T=time.time() elif time.time()-T>10: break #links=self.browser.find_link_by_partial_href("hc_location=friends_tab") links=self.browser.find_by_css(".fcb") friends=[] for link in links: name=link.value user_id_=link.find_by_tag("a")["href"].split("/")[-1].split("?")[0] friends.append((user_id_,name)) tdict={} tdict["name"]=self.browser.find_by_id("fb-timeline-cover-name").value tdict["user_id"]=user_id tdict["friends"]=friends infos=self.browser.find_by_css("._3c_") mutual=0 for info in infos: if info.value=="Mutual Friends": if info.find_by_css("._3d0").value: tdict["n_mutual"]=info.find_by_css("._3d0").value mutual=1 if info.value=="All Friends": tdict["n_friends"]=info.find_by_css("._3d0").value if mutual==0: links=self.browser.find_by_css("._gs6") if "Mutual" in links.value: tdict["n_mutual"]=links.value.split(" ")[0] if write: if not os.path.isdir("{}/fb_ids/".format(self._BASE_DIR)): os.mkdir("{}/fb_ids/".format(self._BASE_DIR)) with open("{}fb_ids/{}.pickle".format(self._BASE_DIR,user_id),"wb") as f: pickle.dump(tdict,f) self.tdict=tdict return tdict
[ "def", "getFriends", "(", "self", ",", "user_id", "=", "\"astronauta.mecanico\"", ",", "write", "=", "True", ")", ":", "while", "user_id", "not", "in", "self", ".", "browser", ".", "url", ":", "self", ".", "browser", ".", "visit", "(", "\"http://www.facebo...
Returns user_ids (that you have access) of the friends of your friend with user_ids
[ "Returns", "user_ids", "(", "that", "you", "have", "access", ")", "of", "the", "friends", "of", "your", "friend", "with", "user_ids" ]
train
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L611-L655
benley/butcher
butcher/main.py
resolve
def resolve(args): """Just print the result of parsing a target string.""" if not args: log.error('Exactly 1 argument is required.') app.quit(1) print(address.new(args[0]))
python
def resolve(args): """Just print the result of parsing a target string.""" if not args: log.error('Exactly 1 argument is required.') app.quit(1) print(address.new(args[0]))
[ "def", "resolve", "(", "args", ")", ":", "if", "not", "args", ":", "log", ".", "error", "(", "'Exactly 1 argument is required.'", ")", "app", ".", "quit", "(", "1", ")", "print", "(", "address", ".", "new", "(", "args", "[", "0", "]", ")", ")" ]
Just print the result of parsing a target string.
[ "Just", "print", "the", "result", "of", "parsing", "a", "target", "string", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L315-L320
benley/butcher
butcher/main.py
build
def build(args): """Build a target and its dependencies.""" if len(args) != 1: log.error('One target required.') app.quit(1) target = address.new(args[0]) log.info('Resolved target to: %s', target) try: bb = Butcher() bb.clean() bb.load_graph(target) bb.build(target) except (gitrepo.GitError, error.BrokenGraph, error.NoSuchTargetError) as err: log.fatal(err) app.quit(1) except error.OverallBuildFailure as err: log.fatal(err) log.fatal('Error list:') [log.fatal(' [%s]: %s', e.node, e) for e in bb.failure_log] app.quit(1)
python
def build(args): """Build a target and its dependencies.""" if len(args) != 1: log.error('One target required.') app.quit(1) target = address.new(args[0]) log.info('Resolved target to: %s', target) try: bb = Butcher() bb.clean() bb.load_graph(target) bb.build(target) except (gitrepo.GitError, error.BrokenGraph, error.NoSuchTargetError) as err: log.fatal(err) app.quit(1) except error.OverallBuildFailure as err: log.fatal(err) log.fatal('Error list:') [log.fatal(' [%s]: %s', e.node, e) for e in bb.failure_log] app.quit(1)
[ "def", "build", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", ":", "log", ".", "error", "(", "'One target required.'", ")", "app", ".", "quit", "(", "1", ")", "target", "=", "address", ".", "new", "(", "args", "[", "0", "]", ...
Build a target and its dependencies.
[ "Build", "a", "target", "and", "its", "dependencies", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L324-L348
benley/butcher
butcher/main.py
rebuild
def rebuild(args): """Rebuild a target and deps, even if it has been built and cached.""" if len(args) != 1: log.fatal('One target required.') app.quit(1) app.set_option('disable_cache_fetch', True) Butcher.options['cache_fetch'] = False build(args)
python
def rebuild(args): """Rebuild a target and deps, even if it has been built and cached.""" if len(args) != 1: log.fatal('One target required.') app.quit(1) app.set_option('disable_cache_fetch', True) Butcher.options['cache_fetch'] = False build(args)
[ "def", "rebuild", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", ":", "log", ".", "fatal", "(", "'One target required.'", ")", "app", ".", "quit", "(", "1", ")", "app", ".", "set_option", "(", "'disable_cache_fetch'", ",", "True", "...
Rebuild a target and deps, even if it has been built and cached.
[ "Rebuild", "a", "target", "and", "deps", "even", "if", "it", "has", "been", "built", "and", "cached", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L352-L360
benley/butcher
butcher/main.py
dump
def dump(args): """Load the build graph for a target and dump it to stdout.""" if len(args) != 1: log.error('One target required.') app.quit(1) try: bb = Butcher() bb.load_graph(args[0]) except error.BrokenGraph as lolno: log.fatal(lolno) app.quit(1) print "Nodes:" pprint.pprint(bb.graph.node) print "Edges:" pprint.pprint(bb.graph.edge)
python
def dump(args): """Load the build graph for a target and dump it to stdout.""" if len(args) != 1: log.error('One target required.') app.quit(1) try: bb = Butcher() bb.load_graph(args[0]) except error.BrokenGraph as lolno: log.fatal(lolno) app.quit(1) print "Nodes:" pprint.pprint(bb.graph.node) print "Edges:" pprint.pprint(bb.graph.edge)
[ "def", "dump", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "1", ":", "log", ".", "error", "(", "'One target required.'", ")", "app", ".", "quit", "(", "1", ")", "try", ":", "bb", "=", "Butcher", "(", ")", "bb", ".", "load_graph", ...
Load the build graph for a target and dump it to stdout.
[ "Load", "the", "build", "graph", "for", "a", "target", "and", "dump", "it", "to", "stdout", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L371-L386
benley/butcher
butcher/main.py
draw
def draw(args): """Load the build graph for a target and render it to an image.""" if len(args) != 2: log.error('Two arguments required: [build target] [output file]') app.quit(1) target = args[0] out = args[1] try: bb = Butcher() bb.load_graph(target) except error.BrokenGraph as lolno: log.fatal(lolno) app.quit(1) # Filter down to the target and all of its transitive dependencies. # TODO: make it possible to optionally draw the entire graph filtered_graph = bb.graph.subgraph( networkx.topological_sort(bb.graph, nbunch=[address.new(target)])) a = networkx.to_agraph(filtered_graph) a.draw(out, prog='dot') log.info('Graph written to %s', out)
python
def draw(args): """Load the build graph for a target and render it to an image.""" if len(args) != 2: log.error('Two arguments required: [build target] [output file]') app.quit(1) target = args[0] out = args[1] try: bb = Butcher() bb.load_graph(target) except error.BrokenGraph as lolno: log.fatal(lolno) app.quit(1) # Filter down to the target and all of its transitive dependencies. # TODO: make it possible to optionally draw the entire graph filtered_graph = bb.graph.subgraph( networkx.topological_sort(bb.graph, nbunch=[address.new(target)])) a = networkx.to_agraph(filtered_graph) a.draw(out, prog='dot') log.info('Graph written to %s', out)
[ "def", "draw", "(", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "2", ":", "log", ".", "error", "(", "'Two arguments required: [build target] [output file]'", ")", "app", ".", "quit", "(", "1", ")", "target", "=", "args", "[", "0", "]", "out"...
Load the build graph for a target and render it to an image.
[ "Load", "the", "build", "graph", "for", "a", "target", "and", "render", "it", "to", "an", "image", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L390-L413
benley/butcher
butcher/main.py
stub_main
def stub_main(): """setuptools blah: it still can't run a module as a script entry_point""" from google.apputils import run_script_module import butcher.main run_script_module.RunScriptModule(butcher.main)
python
def stub_main(): """setuptools blah: it still can't run a module as a script entry_point""" from google.apputils import run_script_module import butcher.main run_script_module.RunScriptModule(butcher.main)
[ "def", "stub_main", "(", ")", ":", "from", "google", ".", "apputils", "import", "run_script_module", "import", "butcher", ".", "main", "run_script_module", ".", "RunScriptModule", "(", "butcher", ".", "main", ")" ]
setuptools blah: it still can't run a module as a script entry_point
[ "setuptools", "blah", ":", "it", "still", "can", "t", "run", "a", "module", "as", "a", "script", "entry_point" ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L416-L420
benley/butcher
butcher/main.py
Butcher.setup_function
def setup_function(self): """Runs prior to the global main function.""" log.options.LogOptions.set_stderr_log_level('google:INFO') if app.get_options().debug: log.options.LogOptions.set_stderr_log_level('google:DEBUG') if not app.get_options().build_root: app.set_option('build_root', os.path.join( app.get_options().butcher_basedir, 'build')) self.buildroot = app.get_options().build_root if not os.path.exists(self.buildroot): os.makedirs(self.buildroot) if app.get_options().disable_cache_fetch: self.options['cache_fetch'] = False if app.get_options().disable_hardlinks: base.BaseBuilder.linkfiles = False
python
def setup_function(self): """Runs prior to the global main function.""" log.options.LogOptions.set_stderr_log_level('google:INFO') if app.get_options().debug: log.options.LogOptions.set_stderr_log_level('google:DEBUG') if not app.get_options().build_root: app.set_option('build_root', os.path.join( app.get_options().butcher_basedir, 'build')) self.buildroot = app.get_options().build_root if not os.path.exists(self.buildroot): os.makedirs(self.buildroot) if app.get_options().disable_cache_fetch: self.options['cache_fetch'] = False if app.get_options().disable_hardlinks: base.BaseBuilder.linkfiles = False
[ "def", "setup_function", "(", "self", ")", ":", "log", ".", "options", ".", "LogOptions", ".", "set_stderr_log_level", "(", "'google:INFO'", ")", "if", "app", ".", "get_options", "(", ")", ".", "debug", ":", "log", ".", "options", ".", "LogOptions", ".", ...
Runs prior to the global main function.
[ "Runs", "prior", "to", "the", "global", "main", "function", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L75-L89
benley/butcher
butcher/main.py
Butcher.clean
def clean(self): """Clear the contents of the build area.""" if os.path.exists(self.buildroot): log.info('Clearing the build area.') log.debug('Deleting: %s', self.buildroot) shutil.rmtree(self.buildroot) os.makedirs(self.buildroot)
python
def clean(self): """Clear the contents of the build area.""" if os.path.exists(self.buildroot): log.info('Clearing the build area.') log.debug('Deleting: %s', self.buildroot) shutil.rmtree(self.buildroot) os.makedirs(self.buildroot)
[ "def", "clean", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "buildroot", ")", ":", "log", ".", "info", "(", "'Clearing the build area.'", ")", "log", ".", "debug", "(", "'Deleting: %s'", ",", "self", ".", "buildroot...
Clear the contents of the build area.
[ "Clear", "the", "contents", "of", "the", "build", "area", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L91-L97
benley/butcher
butcher/main.py
Butcher.paths_wanted
def paths_wanted(self): """The set of paths where we expect to find missing nodes.""" return set(address.new(b, target='all') for b in self.missing_nodes)
python
def paths_wanted(self): """The set of paths where we expect to find missing nodes.""" return set(address.new(b, target='all') for b in self.missing_nodes)
[ "def", "paths_wanted", "(", "self", ")", ":", "return", "set", "(", "address", ".", "new", "(", "b", ",", "target", "=", "'all'", ")", "for", "b", "in", "self", ".", "missing_nodes", ")" ]
The set of paths where we expect to find missing nodes.
[ "The", "set", "of", "paths", "where", "we", "expect", "to", "find", "missing", "nodes", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L282-L284
benley/butcher
butcher/main.py
Butcher.missing_nodes
def missing_nodes(self): """The set of targets known as dependencies but not yet defined.""" missing = set() for target_addr, target_attrs in self.graph.node.items(): if 'target_obj' not in target_attrs: missing.add(target_addr) return missing
python
def missing_nodes(self): """The set of targets known as dependencies but not yet defined.""" missing = set() for target_addr, target_attrs in self.graph.node.items(): if 'target_obj' not in target_attrs: missing.add(target_addr) return missing
[ "def", "missing_nodes", "(", "self", ")", ":", "missing", "=", "set", "(", ")", "for", "target_addr", ",", "target_attrs", "in", "self", ".", "graph", ".", "node", ".", "items", "(", ")", ":", "if", "'target_obj'", "not", "in", "target_attrs", ":", "mi...
The set of targets known as dependencies but not yet defined.
[ "The", "set", "of", "targets", "known", "as", "dependencies", "but", "not", "yet", "defined", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L287-L293
benley/butcher
butcher/main.py
Butcher.load_buildfile
def load_buildfile(self, target): """Pull a build file from git.""" log.info('Loading: %s', target) filepath = os.path.join(target.path, app.get_options().buildfile_name) try: repo = self.repo_state.GetRepo(target.repo) return repo.get_file(filepath) except gitrepo.GitError as err: log.error('Failed loading %s: %s', target, err) raise error.BrokenGraph('Sadface.')
python
def load_buildfile(self, target): """Pull a build file from git.""" log.info('Loading: %s', target) filepath = os.path.join(target.path, app.get_options().buildfile_name) try: repo = self.repo_state.GetRepo(target.repo) return repo.get_file(filepath) except gitrepo.GitError as err: log.error('Failed loading %s: %s', target, err) raise error.BrokenGraph('Sadface.')
[ "def", "load_buildfile", "(", "self", ",", "target", ")", ":", "log", ".", "info", "(", "'Loading: %s'", ",", "target", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "target", ".", "path", ",", "app", ".", "get_options", "(", ")", ".", ...
Pull a build file from git.
[ "Pull", "a", "build", "file", "from", "git", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L295-L304
Aluriak/ACCC
accc/langspec/langspec.py
constructSpec
def constructSpec(indentation, begin_block, end_block, begin_line, end_line, begin_action, end_action, begin_condition, end_condition, logical_and, logical_or): """Return a language specification based on parameters.""" return { INDENTATION : indentation, BEG_BLOCK : begin_block, END_BLOCK : end_block, BEG_LINE : begin_line, END_LINE : end_line, BEG_ACTION : begin_action, END_ACTION : end_action, BEG_CONDITION : begin_condition, END_CONDITION : end_condition, LOGICAL_AND : logical_and, LOGICAL_OR : logical_or }
python
def constructSpec(indentation, begin_block, end_block, begin_line, end_line, begin_action, end_action, begin_condition, end_condition, logical_and, logical_or): """Return a language specification based on parameters.""" return { INDENTATION : indentation, BEG_BLOCK : begin_block, END_BLOCK : end_block, BEG_LINE : begin_line, END_LINE : end_line, BEG_ACTION : begin_action, END_ACTION : end_action, BEG_CONDITION : begin_condition, END_CONDITION : end_condition, LOGICAL_AND : logical_and, LOGICAL_OR : logical_or }
[ "def", "constructSpec", "(", "indentation", ",", "begin_block", ",", "end_block", ",", "begin_line", ",", "end_line", ",", "begin_action", ",", "end_action", ",", "begin_condition", ",", "end_condition", ",", "logical_and", ",", "logical_or", ")", ":", "return", ...
Return a language specification based on parameters.
[ "Return", "a", "language", "specification", "based", "on", "parameters", "." ]
train
https://github.com/Aluriak/ACCC/blob/9092f985bef7ed784264c86bc19c980f4ce2309f/accc/langspec/langspec.py#L40-L57
Aluriak/ACCC
accc/langspec/langspec.py
translated
def translated(structure, values, lang_spec): """Return code associated to given structure and values, translate with given language specification.""" # LANGUAGE SPECS indentation = '\t' endline = '\n' object_code = "" stack = [] # define shortcuts to behavior push = lambda x: stack.append(x) pop = lambda : stack.pop() last = lambda : stack[-1] if len(stack) > 0 else ' ' def indented_code(s, level, end): return lang_spec[INDENTATION]*level + s + end # recreate python structure, and replace type by value level = 0 CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION] ACTION = LEXEM_TYPE_ACTION DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL for lexem_type in structure: if lexem_type is ACTION: # place previous conditions if necessary if last() in CONDITIONS: # construct conditions lines value, values = values[0:len(stack)], values[len(stack):] object_code += (indented_code(lang_spec[BEG_CONDITION] + lang_spec[LOGICAL_AND].join(value) + lang_spec[END_CONDITION], level, lang_spec[END_LINE] )) # if provided, print the begin block token on a new line if len(lang_spec[BEG_BLOCK]) > 0: object_code += indented_code( lang_spec[BEG_BLOCK], level, lang_spec[END_LINE] ) stack = [] level += 1 # and place the action object_code += indented_code( lang_spec[BEG_ACTION] + values[0], level, lang_spec[END_ACTION]+lang_spec[END_LINE] ) values = values[1:] elif lexem_type in CONDITIONS: push(lexem_type) elif lexem_type is DOWNLEVEL: if last() not in CONDITIONS: # down level, and add a END_BLOCK only if needed level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # add END_BLOCK while needed for reach level 0 while level > 0: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # Finished ! return object_code
python
def translated(structure, values, lang_spec): """Return code associated to given structure and values, translate with given language specification.""" # LANGUAGE SPECS indentation = '\t' endline = '\n' object_code = "" stack = [] # define shortcuts to behavior push = lambda x: stack.append(x) pop = lambda : stack.pop() last = lambda : stack[-1] if len(stack) > 0 else ' ' def indented_code(s, level, end): return lang_spec[INDENTATION]*level + s + end # recreate python structure, and replace type by value level = 0 CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION] ACTION = LEXEM_TYPE_ACTION DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL for lexem_type in structure: if lexem_type is ACTION: # place previous conditions if necessary if last() in CONDITIONS: # construct conditions lines value, values = values[0:len(stack)], values[len(stack):] object_code += (indented_code(lang_spec[BEG_CONDITION] + lang_spec[LOGICAL_AND].join(value) + lang_spec[END_CONDITION], level, lang_spec[END_LINE] )) # if provided, print the begin block token on a new line if len(lang_spec[BEG_BLOCK]) > 0: object_code += indented_code( lang_spec[BEG_BLOCK], level, lang_spec[END_LINE] ) stack = [] level += 1 # and place the action object_code += indented_code( lang_spec[BEG_ACTION] + values[0], level, lang_spec[END_ACTION]+lang_spec[END_LINE] ) values = values[1:] elif lexem_type in CONDITIONS: push(lexem_type) elif lexem_type is DOWNLEVEL: if last() not in CONDITIONS: # down level, and add a END_BLOCK only if needed level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # add END_BLOCK while needed for reach level 0 while level > 0: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 # Finished ! return object_code
[ "def", "translated", "(", "structure", ",", "values", ",", "lang_spec", ")", ":", "# LANGUAGE SPECS", "indentation", "=", "'\\t'", "endline", "=", "'\\n'", "object_code", "=", "\"\"", "stack", "=", "[", "]", "# define shortcuts to behavior", "push", "=", "lambda...
Return code associated to given structure and values, translate with given language specification.
[ "Return", "code", "associated", "to", "given", "structure", "and", "values", "translate", "with", "given", "language", "specification", "." ]
train
https://github.com/Aluriak/ACCC/blob/9092f985bef7ed784264c86bc19c980f4ce2309f/accc/langspec/langspec.py#L63-L138
Aluriak/ACCC
accc/langspec/langspec.py
cpp_spec
def cpp_spec(): """C++ specification, provided for example, and java compatible.""" return { INDENTATION : '\t', BEG_BLOCK : '{', END_BLOCK : '}', BEG_LINE : '', END_LINE : '\n', BEG_ACTION : '', END_ACTION : ';', BEG_CONDITION : 'if(', END_CONDITION : ')', LOGICAL_AND : ' && ', LOGICAL_OR : ' || ' }
python
def cpp_spec(): """C++ specification, provided for example, and java compatible.""" return { INDENTATION : '\t', BEG_BLOCK : '{', END_BLOCK : '}', BEG_LINE : '', END_LINE : '\n', BEG_ACTION : '', END_ACTION : ';', BEG_CONDITION : 'if(', END_CONDITION : ')', LOGICAL_AND : ' && ', LOGICAL_OR : ' || ' }
[ "def", "cpp_spec", "(", ")", ":", "return", "{", "INDENTATION", ":", "'\\t'", ",", "BEG_BLOCK", ":", "'{'", ",", "END_BLOCK", ":", "'}'", ",", "BEG_LINE", ":", "''", ",", "END_LINE", ":", "'\\n'", ",", "BEG_ACTION", ":", "''", ",", "END_ACTION", ":", ...
C++ specification, provided for example, and java compatible.
[ "C", "++", "specification", "provided", "for", "example", "and", "java", "compatible", "." ]
train
https://github.com/Aluriak/ACCC/blob/9092f985bef7ed784264c86bc19c980f4ce2309f/accc/langspec/langspec.py#L149-L163
azraq27/neural
neural/driver.py
driver_send
def driver_send(command,hostname=None,wait=0.2): '''Send a command (or ``list`` of commands) to AFNI at ``hostname`` (defaults to local host) Requires plugouts enabled (open afni with ``-yesplugouts`` or set ``AFNI_YESPLUGOUTS = YES`` in ``.afnirc``) If ``wait`` is not ``None``, will automatically sleep ``wait`` seconds after sending the command (to make sure it took effect)''' cmd = ['plugout_drive'] if hostname: cmd += ['-host',hostname] if isinstance(command,basestring): command = [command] cmd += [['-com',x] for x in command] + ['-quit'] o = nl.run(cmd,quiet=None,stderr=None) if wait!=None: time.sleep(wait)
python
def driver_send(command,hostname=None,wait=0.2): '''Send a command (or ``list`` of commands) to AFNI at ``hostname`` (defaults to local host) Requires plugouts enabled (open afni with ``-yesplugouts`` or set ``AFNI_YESPLUGOUTS = YES`` in ``.afnirc``) If ``wait`` is not ``None``, will automatically sleep ``wait`` seconds after sending the command (to make sure it took effect)''' cmd = ['plugout_drive'] if hostname: cmd += ['-host',hostname] if isinstance(command,basestring): command = [command] cmd += [['-com',x] for x in command] + ['-quit'] o = nl.run(cmd,quiet=None,stderr=None) if wait!=None: time.sleep(wait)
[ "def", "driver_send", "(", "command", ",", "hostname", "=", "None", ",", "wait", "=", "0.2", ")", ":", "cmd", "=", "[", "'plugout_drive'", "]", "if", "hostname", ":", "cmd", "+=", "[", "'-host'", ",", "hostname", "]", "if", "isinstance", "(", "command"...
Send a command (or ``list`` of commands) to AFNI at ``hostname`` (defaults to local host) Requires plugouts enabled (open afni with ``-yesplugouts`` or set ``AFNI_YESPLUGOUTS = YES`` in ``.afnirc``) If ``wait`` is not ``None``, will automatically sleep ``wait`` seconds after sending the command (to make sure it took effect)
[ "Send", "a", "command", "(", "or", "list", "of", "commands", ")", "to", "AFNI", "at", "hostname", "(", "defaults", "to", "local", "host", ")", "Requires", "plugouts", "enabled", "(", "open", "afni", "with", "-", "yesplugouts", "or", "set", "AFNI_YESPLUGOUT...
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/driver.py#L5-L17
azraq27/neural
neural/driver.py
save_image
def save_image(filename,view='axial',type='png',hostname=None): '''Save currently open AFNI view ``view`` to ``filename`` using ``type`` (``png`` or ``jpeg``)''' driver_send("SAVE_%s %simage %s" % (type.upper(),view.lower(),filename),hostname=hostname)
python
def save_image(filename,view='axial',type='png',hostname=None): '''Save currently open AFNI view ``view`` to ``filename`` using ``type`` (``png`` or ``jpeg``)''' driver_send("SAVE_%s %simage %s" % (type.upper(),view.lower(),filename),hostname=hostname)
[ "def", "save_image", "(", "filename", ",", "view", "=", "'axial'", ",", "type", "=", "'png'", ",", "hostname", "=", "None", ")", ":", "driver_send", "(", "\"SAVE_%s %simage %s\"", "%", "(", "type", ".", "upper", "(", ")", ",", "view", ".", "lower", "("...
Save currently open AFNI view ``view`` to ``filename`` using ``type`` (``png`` or ``jpeg``)
[ "Save", "currently", "open", "AFNI", "view", "view", "to", "filename", "using", "type", "(", "png", "or", "jpeg", ")" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/driver.py#L19-L21
azraq27/neural
neural/driver.py
set_thresh
def set_thresh(thresh,p=False,hostname=None): '''Sets the level of the threshold slider. If ``p==True`` will be interpreted as a _p_-value''' driver_send("SET_THRESHNEW %s *%s" % (str(thresh),"p" if p else ""),hostname=hostname)
python
def set_thresh(thresh,p=False,hostname=None): '''Sets the level of the threshold slider. If ``p==True`` will be interpreted as a _p_-value''' driver_send("SET_THRESHNEW %s *%s" % (str(thresh),"p" if p else ""),hostname=hostname)
[ "def", "set_thresh", "(", "thresh", ",", "p", "=", "False", ",", "hostname", "=", "None", ")", ":", "driver_send", "(", "\"SET_THRESHNEW %s *%s\"", "%", "(", "str", "(", "thresh", ")", ",", "\"p\"", "if", "p", "else", "\"\"", ")", ",", "hostname", "=",...
Sets the level of the threshold slider. If ``p==True`` will be interpreted as a _p_-value
[ "Sets", "the", "level", "of", "the", "threshold", "slider", ".", "If", "p", "==", "True", "will", "be", "interpreted", "as", "a", "_p_", "-", "value" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/driver.py#L23-L26
PsypherPunk/django-annotator
annotator/views.py
AnnotationViewSet.search
def search(self, _): """ Implements the `search <http://docs.annotatorjs.org/en/v1.2.x/storage.html#search>`_ endpoint. We rely on the behaviour of the ``filter_backends`` to manage the actual filtering of search results. :param _: :class:`rest_framework.request.Request` object—ignored here as we rely on the ``filter_backends``. :return: filtered :class:`rest_framework.response.Response`. """ queryset = super(AnnotationViewSet, self).filter_queryset( self.get_queryset()) serializer = self.get_serializer(queryset, many=True) return Response({ "total": len(serializer.data), "rows": serializer.data })
python
def search(self, _): """ Implements the `search <http://docs.annotatorjs.org/en/v1.2.x/storage.html#search>`_ endpoint. We rely on the behaviour of the ``filter_backends`` to manage the actual filtering of search results. :param _: :class:`rest_framework.request.Request` object—ignored here as we rely on the ``filter_backends``. :return: filtered :class:`rest_framework.response.Response`. """ queryset = super(AnnotationViewSet, self).filter_queryset( self.get_queryset()) serializer = self.get_serializer(queryset, many=True) return Response({ "total": len(serializer.data), "rows": serializer.data })
[ "def", "search", "(", "self", ",", "_", ")", ":", "queryset", "=", "super", "(", "AnnotationViewSet", ",", "self", ")", ".", "filter_queryset", "(", "self", ".", "get_queryset", "(", ")", ")", "serializer", "=", "self", ".", "get_serializer", "(", "query...
Implements the `search <http://docs.annotatorjs.org/en/v1.2.x/storage.html#search>`_ endpoint. We rely on the behaviour of the ``filter_backends`` to manage the actual filtering of search results. :param _: :class:`rest_framework.request.Request` object—ignored here as we rely on the ``filter_backends``. :return: filtered :class:`rest_framework.response.Response`.
[ "Implements", "the", "search", "<http", ":", "//", "docs", ".", "annotatorjs", ".", "org", "/", "en", "/", "v1", ".", "2", ".", "x", "/", "storage", ".", "html#search", ">", "_", "endpoint", "." ]
train
https://github.com/PsypherPunk/django-annotator/blob/7c27e8569672255dc074a9bc3f5b57a776bc52b7/annotator/views.py#L39-L61
PsypherPunk/django-annotator
annotator/views.py
AnnotationViewSet.get_success_headers
def get_success_headers(self, data): """ As per the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ and `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoints, we must return an absolute URL in the ``Location`` header. :param data: serialized object. :return: :class:`dict` of HTTP headers. """ headers = super(AnnotationViewSet, self).get_success_headers(data) url = urlresolvers.reverse("annotations-detail", kwargs={"pk": data["id"]}) headers.update({"Location": self.request.build_absolute_uri(url)}) return headers
python
def get_success_headers(self, data): """ As per the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ and `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoints, we must return an absolute URL in the ``Location`` header. :param data: serialized object. :return: :class:`dict` of HTTP headers. """ headers = super(AnnotationViewSet, self).get_success_headers(data) url = urlresolvers.reverse("annotations-detail", kwargs={"pk": data["id"]}) headers.update({"Location": self.request.build_absolute_uri(url)}) return headers
[ "def", "get_success_headers", "(", "self", ",", "data", ")", ":", "headers", "=", "super", "(", "AnnotationViewSet", ",", "self", ")", ".", "get_success_headers", "(", "data", ")", "url", "=", "urlresolvers", ".", "reverse", "(", "\"annotations-detail\"", ",",...
As per the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ and `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoints, we must return an absolute URL in the ``Location`` header. :param data: serialized object. :return: :class:`dict` of HTTP headers.
[ "As", "per", "the", "*", "Annotator", "*", "documentation", "regarding", "the", "create", "<http", ":", "//", "docs", ".", "annotatorjs", ".", "org", "/", "en", "/", "v1", ".", "2", ".", "x", "/", "storage", ".", "html#create", ">", "_", "and", "upda...
train
https://github.com/PsypherPunk/django-annotator/blob/7c27e8569672255dc074a9bc3f5b57a776bc52b7/annotator/views.py#L63-L82
PsypherPunk/django-annotator
annotator/views.py
AnnotationViewSet.create
def create(self, request, *args, **kwargs): """ See the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`. """ response = super(AnnotationViewSet, self).create(request, *args, **kwargs) response.data = None response.status_code = status.HTTP_303_SEE_OTHER return response
python
def create(self, request, *args, **kwargs): """ See the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`. """ response = super(AnnotationViewSet, self).create(request, *args, **kwargs) response.data = None response.status_code = status.HTTP_303_SEE_OTHER return response
[ "def", "create", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "super", "(", "AnnotationViewSet", ",", "self", ")", ".", "create", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ...
See the *Annotator* documentation regarding the `create <http://docs.annotatorjs.org/en/v1.2.x/storage.html#create>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`.
[ "See", "the", "*", "Annotator", "*", "documentation", "regarding", "the", "create", "<http", ":", "//", "docs", ".", "annotatorjs", ".", "org", "/", "en", "/", "v1", ".", "2", ".", "x", "/", "storage", ".", "html#create", ">", "_", "endpoint", "." ]
train
https://github.com/PsypherPunk/django-annotator/blob/7c27e8569672255dc074a9bc3f5b57a776bc52b7/annotator/views.py#L84-L100
PsypherPunk/django-annotator
annotator/views.py
AnnotationViewSet.update
def update(self, request, *args, **kwargs): """ See the *Annotator* documentation regarding the `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`. """ response = super(AnnotationViewSet, self).update(request, *args, **kwargs) for h, v in self.get_success_headers(response.data).items(): response[h] = v response.data = None response.status_code = status.HTTP_303_SEE_OTHER return response
python
def update(self, request, *args, **kwargs): """ See the *Annotator* documentation regarding the `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`. """ response = super(AnnotationViewSet, self).update(request, *args, **kwargs) for h, v in self.get_success_headers(response.data).items(): response[h] = v response.data = None response.status_code = status.HTTP_303_SEE_OTHER return response
[ "def", "update", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "super", "(", "AnnotationViewSet", ",", "self", ")", ".", "update", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ...
See the *Annotator* documentation regarding the `update <http://docs.annotatorjs.org/en/v1.2.x/storage.html#update>`_ endpoint. :param request: incoming :class:`rest_framework.request.Request`. :return: 303 :class:`rest_framework.response.Response`.
[ "See", "the", "*", "Annotator", "*", "documentation", "regarding", "the", "update", "<http", ":", "//", "docs", ".", "annotatorjs", ".", "org", "/", "en", "/", "v1", ".", "2", ".", "x", "/", "storage", ".", "html#update", ">", "_", "endpoint", "." ]
train
https://github.com/PsypherPunk/django-annotator/blob/7c27e8569672255dc074a9bc3f5b57a776bc52b7/annotator/views.py#L102-L120
mixmastamyk/fr
fr/windows.py
get_diskinfo
def get_diskinfo(opts, show_all=False, local_only=False): ''' Returns a list holding the current disk info, stats divided by the ouptut unit. ''' disks = [] outunit = opts.outunit for drive in get_drives(): drive += ':\\' disk = DiskInfo(dev=drive) try: usage = get_fs_usage(drive) except WindowsError: # disk not ready, request aborted, etc. if show_all: usage = _diskusage(0, 0, 0) else: continue disk.ocap = usage.total disk.cap = usage.total / outunit disk.used = usage.used / outunit disk.free = usage.free / outunit disk.label = get_vol_info(drive).name if usage.total: disk.pcnt = float(usage.used) / usage.total * 100 else: disk.pcnt = 0 disk.mntp = '' disk.ismntd = True # TODO needs work # type is not working on Win7 under VirtualBox? dtint, dtstr = get_drive_type(drive) setattr(disk, *_drive_type_result[dtint]) disk.rw = os.access(drive, os.W_OK) # doesn't work on optical if usage.total: # this not giving correct result on Win7 RTM either disk.rw = stat.S_IMODE(os.stat(drive).st_mode) & stat.S_IWRITE else: disk.rw = False disks.append(disk) if opts.debug: for disk in disks: print(disk.dev, disk, '\n') return disks
python
def get_diskinfo(opts, show_all=False, local_only=False): ''' Returns a list holding the current disk info, stats divided by the ouptut unit. ''' disks = [] outunit = opts.outunit for drive in get_drives(): drive += ':\\' disk = DiskInfo(dev=drive) try: usage = get_fs_usage(drive) except WindowsError: # disk not ready, request aborted, etc. if show_all: usage = _diskusage(0, 0, 0) else: continue disk.ocap = usage.total disk.cap = usage.total / outunit disk.used = usage.used / outunit disk.free = usage.free / outunit disk.label = get_vol_info(drive).name if usage.total: disk.pcnt = float(usage.used) / usage.total * 100 else: disk.pcnt = 0 disk.mntp = '' disk.ismntd = True # TODO needs work # type is not working on Win7 under VirtualBox? dtint, dtstr = get_drive_type(drive) setattr(disk, *_drive_type_result[dtint]) disk.rw = os.access(drive, os.W_OK) # doesn't work on optical if usage.total: # this not giving correct result on Win7 RTM either disk.rw = stat.S_IMODE(os.stat(drive).st_mode) & stat.S_IWRITE else: disk.rw = False disks.append(disk) if opts.debug: for disk in disks: print(disk.dev, disk, '\n') return disks
[ "def", "get_diskinfo", "(", "opts", ",", "show_all", "=", "False", ",", "local_only", "=", "False", ")", ":", "disks", "=", "[", "]", "outunit", "=", "opts", ".", "outunit", "for", "drive", "in", "get_drives", "(", ")", ":", "drive", "+=", "':\\\\'", ...
Returns a list holding the current disk info, stats divided by the ouptut unit.
[ "Returns", "a", "list", "holding", "the", "current", "disk", "info", "stats", "divided", "by", "the", "ouptut", "unit", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/windows.py#L102-L145
mixmastamyk/fr
fr/windows.py
get_meminfo
def get_meminfo(opts): ''' Returns a dictionary holding the current memory info, divided by the ouptut unit. ''' meminfo = MemInfo() outunit = opts.outunit mstat = get_mem_info() # from winstats pinf = get_perf_info() try: pgpcnt = get_perf_data(r'\Paging File(_Total)\% Usage', 'double')[0] / 100 except WindowsError: pgpcnt = 0 totl = mstat.TotalPhys meminfo.memtotal = totl / float(outunit) used = totl * mstat.MemoryLoad / 100.0 # percent, more reliable meminfo.used = used / float(outunit) left = totl - used # Cached cache = pinf.SystemCacheBytes if cache > left and version >= win7ver: # Win7 RTM bug :/ this cache number is bogus free = get_perf_data(r'\Memory\Free & Zero Page List Bytes', 'long')[0] cache = left - free meminfo.memfree = free / float(outunit) else: meminfo.memfree = (totl - used - cache) / float(outunit) meminfo.buffers = 0 meminfo.cached = cache / float(outunit) # SWAP these numbers are actually commit charge, not swap; fix # should not contain RAM :/ swpt = abs(mstat.TotalPageFile - totl) # these nums aren't quite right either, use perfmon instead :/ swpu = swpt * pgpcnt swpf = swpt - swpu meminfo.swaptotal = swpt / float(outunit) meminfo.swapfree = swpf / float(outunit) meminfo.swapused = swpu / float(outunit) meminfo.swapcached = 0 # A linux stat for compat if opts.debug: import locale fmt = lambda val: locale.format('%d', val, True) print() print('TotalPhys:', fmt(totl)) print('AvailPhys:', fmt(mstat.AvailPhys)) print('MemoryLoad:', fmt(mstat.MemoryLoad)) print() print('used:', fmt(used)) print('left:', fmt(left)) if 'free' in locals(): print('PDH Free:', fmt(free)) print('SystemCacheBytes:', fmt(pinf.SystemCacheBytes)) print() print('TotalPageFile:', fmt(mstat.TotalPageFile)) print('AvailPageFile:', fmt(mstat.AvailPageFile)) print('TotalPageFile fixed:', fmt(swpt)) print('AvailPageFile fixed:', fmt(swpf)) return meminfo
python
def get_meminfo(opts): ''' Returns a dictionary holding the current memory info, divided by the ouptut unit. ''' meminfo = MemInfo() outunit = opts.outunit mstat = get_mem_info() # from winstats pinf = get_perf_info() try: pgpcnt = get_perf_data(r'\Paging File(_Total)\% Usage', 'double')[0] / 100 except WindowsError: pgpcnt = 0 totl = mstat.TotalPhys meminfo.memtotal = totl / float(outunit) used = totl * mstat.MemoryLoad / 100.0 # percent, more reliable meminfo.used = used / float(outunit) left = totl - used # Cached cache = pinf.SystemCacheBytes if cache > left and version >= win7ver: # Win7 RTM bug :/ this cache number is bogus free = get_perf_data(r'\Memory\Free & Zero Page List Bytes', 'long')[0] cache = left - free meminfo.memfree = free / float(outunit) else: meminfo.memfree = (totl - used - cache) / float(outunit) meminfo.buffers = 0 meminfo.cached = cache / float(outunit) # SWAP these numbers are actually commit charge, not swap; fix # should not contain RAM :/ swpt = abs(mstat.TotalPageFile - totl) # these nums aren't quite right either, use perfmon instead :/ swpu = swpt * pgpcnt swpf = swpt - swpu meminfo.swaptotal = swpt / float(outunit) meminfo.swapfree = swpf / float(outunit) meminfo.swapused = swpu / float(outunit) meminfo.swapcached = 0 # A linux stat for compat if opts.debug: import locale fmt = lambda val: locale.format('%d', val, True) print() print('TotalPhys:', fmt(totl)) print('AvailPhys:', fmt(mstat.AvailPhys)) print('MemoryLoad:', fmt(mstat.MemoryLoad)) print() print('used:', fmt(used)) print('left:', fmt(left)) if 'free' in locals(): print('PDH Free:', fmt(free)) print('SystemCacheBytes:', fmt(pinf.SystemCacheBytes)) print() print('TotalPageFile:', fmt(mstat.TotalPageFile)) print('AvailPageFile:', fmt(mstat.AvailPageFile)) print('TotalPageFile fixed:', fmt(swpt)) print('AvailPageFile fixed:', fmt(swpf)) return meminfo
[ "def", "get_meminfo", "(", "opts", ")", ":", "meminfo", "=", "MemInfo", "(", ")", "outunit", "=", "opts", ".", "outunit", "mstat", "=", "get_mem_info", "(", ")", "# from winstats", "pinf", "=", "get_perf_info", "(", ")", "try", ":", "pgpcnt", "=", "get_p...
Returns a dictionary holding the current memory info, divided by the ouptut unit.
[ "Returns", "a", "dictionary", "holding", "the", "current", "memory", "info", "divided", "by", "the", "ouptut", "unit", "." ]
train
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/windows.py#L148-L212
duniter/duniter-python-api
duniterpy/api/endpoint.py
endpoint
def endpoint(value: Any) -> Any: """ Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return: """ if issubclass(type(value), Endpoint): return value elif isinstance(value, str): for api, cls in MANAGED_API.items(): if value.startswith(api + " "): return cls.from_inline(value) return UnknownEndpoint.from_inline(value) else: raise TypeError("Cannot convert {0} to endpoint".format(value))
python
def endpoint(value: Any) -> Any: """ Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return: """ if issubclass(type(value), Endpoint): return value elif isinstance(value, str): for api, cls in MANAGED_API.items(): if value.startswith(api + " "): return cls.from_inline(value) return UnknownEndpoint.from_inline(value) else: raise TypeError("Cannot convert {0} to endpoint".format(value))
[ "def", "endpoint", "(", "value", ":", "Any", ")", "->", "Any", ":", "if", "issubclass", "(", "type", "(", "value", ")", ",", "Endpoint", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "for", "api", ",", "cls"...
Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return:
[ "Convert", "a", "endpoint", "string", "to", "the", "corresponding", "Endpoint", "instance", "type" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L547-L562
duniter/duniter-python-api
duniterpy/api/endpoint.py
UnknownEndpoint.from_inline
def from_inline(cls: Type[UnknownEndpointType], inline: str) -> UnknownEndpointType: """ Return UnknownEndpoint instance from endpoint string :param inline: Endpoint string :return: """ try: api = inline.split()[0] properties = inline.split()[1:] return cls(api, properties) except IndexError: raise MalformedDocumentError(inline)
python
def from_inline(cls: Type[UnknownEndpointType], inline: str) -> UnknownEndpointType: """ Return UnknownEndpoint instance from endpoint string :param inline: Endpoint string :return: """ try: api = inline.split()[0] properties = inline.split()[1:] return cls(api, properties) except IndexError: raise MalformedDocumentError(inline)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "UnknownEndpointType", "]", ",", "inline", ":", "str", ")", "->", "UnknownEndpointType", ":", "try", ":", "api", "=", "inline", ".", "split", "(", ")", "[", "0", "]", "properties", "=", "inline", ".",...
Return UnknownEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "UnknownEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L72-L84
duniter/duniter-python-api
duniterpy/api/endpoint.py
UnknownEndpoint.inline
def inline(self) -> str: """ Return endpoint string :return: """ doc = self.api for p in self.properties: doc += " {0}".format(p) return doc
python
def inline(self) -> str: """ Return endpoint string :return: """ doc = self.api for p in self.properties: doc += " {0}".format(p) return doc
[ "def", "inline", "(", "self", ")", "->", "str", ":", "doc", "=", "self", ".", "api", "for", "p", "in", "self", ".", "properties", ":", "doc", "+=", "\" {0}\"", ".", "format", "(", "p", ")", "return", "doc" ]
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L86-L95
duniter/duniter-python-api
duniterpy/api/endpoint.py
BMAEndpoint.from_inline
def from_inline(cls: Type[BMAEndpointType], inline: str) -> BMAEndpointType: """ Return BMAEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = BMAEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(BMAEndpoint.API) server = m.group(1) ipv4 = m.group(2) ipv6 = m.group(3) port = int(m.group(4)) return cls(server, ipv4, ipv6, port)
python
def from_inline(cls: Type[BMAEndpointType], inline: str) -> BMAEndpointType: """ Return BMAEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = BMAEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(BMAEndpoint.API) server = m.group(1) ipv4 = m.group(2) ipv6 = m.group(3) port = int(m.group(4)) return cls(server, ipv4, ipv6, port)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "BMAEndpointType", "]", ",", "inline", ":", "str", ")", "->", "BMAEndpointType", ":", "m", "=", "BMAEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", "is", "None", ":", "raise"...
Return BMAEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "BMAEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L159-L173
duniter/duniter-python-api
duniterpy/api/endpoint.py
BMAEndpoint.inline
def inline(self) -> str: """ Return endpoint string :return: """ return BMAEndpoint.API + "{DNS}{IPv4}{IPv6}{PORT}" \ .format(DNS=(" {0}".format(self.server) if self.server else ""), IPv4=(" {0}".format(self.ipv4) if self.ipv4 else ""), IPv6=(" {0}".format(self.ipv6) if self.ipv6 else ""), PORT=(" {0}".format(self.port) if self.port else ""))
python
def inline(self) -> str: """ Return endpoint string :return: """ return BMAEndpoint.API + "{DNS}{IPv4}{IPv6}{PORT}" \ .format(DNS=(" {0}".format(self.server) if self.server else ""), IPv4=(" {0}".format(self.ipv4) if self.ipv4 else ""), IPv6=(" {0}".format(self.ipv6) if self.ipv6 else ""), PORT=(" {0}".format(self.port) if self.port else ""))
[ "def", "inline", "(", "self", ")", "->", "str", ":", "return", "BMAEndpoint", ".", "API", "+", "\"{DNS}{IPv4}{IPv6}{PORT}\"", ".", "format", "(", "DNS", "=", "(", "\" {0}\"", ".", "format", "(", "self", ".", "server", ")", "if", "self", ".", "server", ...
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L175-L185
duniter/duniter-python-api
duniterpy/api/endpoint.py
SecuredBMAEndpoint.from_inline
def from_inline(cls: Type[SecuredBMAEndpointType], inline: str) -> SecuredBMAEndpointType: """ Return SecuredBMAEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = SecuredBMAEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(SecuredBMAEndpoint.API) server = m.group(1) ipv4 = m.group(2) ipv6 = m.group(3) port = int(m.group(4)) path = m.group(5) if not path: path = "" return cls(server, ipv4, ipv6, port, path)
python
def from_inline(cls: Type[SecuredBMAEndpointType], inline: str) -> SecuredBMAEndpointType: """ Return SecuredBMAEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = SecuredBMAEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(SecuredBMAEndpoint.API) server = m.group(1) ipv4 = m.group(2) ipv6 = m.group(3) port = int(m.group(4)) path = m.group(5) if not path: path = "" return cls(server, ipv4, ipv6, port, path)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "SecuredBMAEndpointType", "]", ",", "inline", ":", "str", ")", "->", "SecuredBMAEndpointType", ":", "m", "=", "SecuredBMAEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", "is", "No...
Return SecuredBMAEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "SecuredBMAEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L242-L259
duniter/duniter-python-api
duniterpy/api/endpoint.py
SecuredBMAEndpoint.inline
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.ipv4, self.ipv6, self.port, self.path) if info] return SecuredBMAEndpoint.API + " " + " ".join(inlined)
python
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.ipv4, self.ipv6, self.port, self.path) if info] return SecuredBMAEndpoint.API + " " + " ".join(inlined)
[ "def", "inline", "(", "self", ")", "->", "str", ":", "inlined", "=", "[", "str", "(", "info", ")", "for", "info", "in", "(", "self", ".", "server", ",", "self", ".", "ipv4", ",", "self", ".", "ipv6", ",", "self", ".", "port", ",", "self", ".", ...
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L261-L268
duniter/duniter-python-api
duniterpy/api/endpoint.py
SecuredBMAEndpoint.conn_handler
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler: """ Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return: """ if self.server: return ConnectionHandler("https", "wss", self.server, self.port, self.path, session, proxy) elif self.ipv6: return ConnectionHandler("https", "wss", "[{0}]".format(self.ipv6), self.port, self.path, session, proxy) return ConnectionHandler("https", "wss", self.ipv4, self.port, self.path, session, proxy)
python
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler: """ Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return: """ if self.server: return ConnectionHandler("https", "wss", self.server, self.port, self.path, session, proxy) elif self.ipv6: return ConnectionHandler("https", "wss", "[{0}]".format(self.ipv6), self.port, self.path, session, proxy) return ConnectionHandler("https", "wss", self.ipv4, self.port, self.path, session, proxy)
[ "def", "conn_handler", "(", "self", ",", "session", ":", "ClientSession", ",", "proxy", ":", "str", "=", "None", ")", "->", "ConnectionHandler", ":", "if", "self", ".", "server", ":", "return", "ConnectionHandler", "(", "\"https\"", ",", "\"wss\"", ",", "s...
Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return:
[ "Return", "connection", "handler", "instance", "for", "the", "endpoint" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L270-L283
duniter/duniter-python-api
duniterpy/api/endpoint.py
WS2PEndpoint.from_inline
def from_inline(cls: Type[WS2PEndpointType], inline: str) -> WS2PEndpointType: """ Return WS2PEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = WS2PEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(WS2PEndpoint.API) ws2pid = m.group(1) server = m.group(2) port = int(m.group(3)) path = m.group(4) if not path: path = "" return cls(ws2pid, server, port, path)
python
def from_inline(cls: Type[WS2PEndpointType], inline: str) -> WS2PEndpointType: """ Return WS2PEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = WS2PEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(WS2PEndpoint.API) ws2pid = m.group(1) server = m.group(2) port = int(m.group(3)) path = m.group(4) if not path: path = "" return cls(ws2pid, server, port, path)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "WS2PEndpointType", "]", ",", "inline", ":", "str", ")", "->", "WS2PEndpointType", ":", "m", "=", "WS2PEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", "is", "None", ":", "rai...
Return WS2PEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "WS2PEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L307-L323
duniter/duniter-python-api
duniterpy/api/endpoint.py
WS2PEndpoint.inline
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.ws2pid, self.server, self.port, self.path) if info] return WS2PEndpoint.API + " " + " ".join(inlined)
python
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.ws2pid, self.server, self.port, self.path) if info] return WS2PEndpoint.API + " " + " ".join(inlined)
[ "def", "inline", "(", "self", ")", "->", "str", ":", "inlined", "=", "[", "str", "(", "info", ")", "for", "info", "in", "(", "self", ".", "ws2pid", ",", "self", ".", "server", ",", "self", ".", "port", ",", "self", ".", "path", ")", "if", "info...
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L325-L332
duniter/duniter-python-api
duniterpy/api/endpoint.py
ESCoreEndpoint.from_inline
def from_inline(cls: Type[ESCoreEndpointType], inline: str) -> ESCoreEndpointType: """ Return ESCoreEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESCoreEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESCoreEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
python
def from_inline(cls: Type[ESCoreEndpointType], inline: str) -> ESCoreEndpointType: """ Return ESCoreEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESCoreEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESCoreEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "ESCoreEndpointType", "]", ",", "inline", ":", "str", ")", "->", "ESCoreEndpointType", ":", "m", "=", "ESCoreEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", "is", "None", ":", ...
Return ESCoreEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "ESCoreEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L372-L384
duniter/duniter-python-api
duniterpy/api/endpoint.py
ESCoreEndpoint.conn_handler
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler: """ Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return: """ return ConnectionHandler("https", "wss", self.server, self.port, "", session, proxy)
python
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler: """ Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return: """ return ConnectionHandler("https", "wss", self.server, self.port, "", session, proxy)
[ "def", "conn_handler", "(", "self", ",", "session", ":", "ClientSession", ",", "proxy", ":", "str", "=", "None", ")", "->", "ConnectionHandler", ":", "return", "ConnectionHandler", "(", "\"https\"", ",", "\"wss\"", ",", "self", ".", "server", ",", "self", ...
Return connection handler instance for the endpoint :param session: AIOHTTP client session instance :param proxy: Proxy url :return:
[ "Return", "connection", "handler", "instance", "for", "the", "endpoint" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L395-L403
duniter/duniter-python-api
duniterpy/api/endpoint.py
ESUserEndpoint.from_inline
def from_inline(cls: Type[ESUserEndpointType], inline: str) -> ESUserEndpointType: """ Return ESUserEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESUserEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESUserEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
python
def from_inline(cls: Type[ESUserEndpointType], inline: str) -> ESUserEndpointType: """ Return ESUserEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESUserEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESUserEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "ESUserEndpointType", "]", ",", "inline", ":", "str", ")", "->", "ESUserEndpointType", ":", "m", "=", "ESUserEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", "is", "None", ":", ...
Return ESUserEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "ESUserEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L432-L444
duniter/duniter-python-api
duniterpy/api/endpoint.py
ESSubscribtionEndpoint.from_inline
def from_inline(cls: Type[ESSubscribtionEndpointType], inline: str) -> ESSubscribtionEndpointType: """ Return ESSubscribtionEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESSubscribtionEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESSubscribtionEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
python
def from_inline(cls: Type[ESSubscribtionEndpointType], inline: str) -> ESSubscribtionEndpointType: """ Return ESSubscribtionEndpoint instance from endpoint string :param inline: Endpoint string :return: """ m = ESSubscribtionEndpoint.re_inline.match(inline) if m is None: raise MalformedDocumentError(ESSubscribtionEndpoint.API) server = m.group(1) port = int(m.group(2)) return cls(server, port)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "ESSubscribtionEndpointType", "]", ",", "inline", ":", "str", ")", "->", "ESSubscribtionEndpointType", ":", "m", "=", "ESSubscribtionEndpoint", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "m", ...
Return ESSubscribtionEndpoint instance from endpoint string :param inline: Endpoint string :return:
[ "Return", "ESSubscribtionEndpoint", "instance", "from", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L492-L504
duniter/duniter-python-api
duniterpy/api/endpoint.py
ESSubscribtionEndpoint.inline
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.port) if info] return ESSubscribtionEndpoint.API + " " + " ".join(inlined)
python
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.port) if info] return ESSubscribtionEndpoint.API + " " + " ".join(inlined)
[ "def", "inline", "(", "self", ")", "->", "str", ":", "inlined", "=", "[", "str", "(", "info", ")", "for", "info", "in", "(", "self", ".", "server", ",", "self", ".", "port", ")", "if", "info", "]", "return", "ESSubscribtionEndpoint", ".", "API", "+...
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L506-L513
twneale/visitors
visitors/base.py
Visitor.continues
def continues(method): '''Method decorator signifying that the visitor should not visit the current node's children once this method has been invoked. ''' @functools.wraps(method) def wrapped(self, *args, **kwargs): yield method(self, *args, **kwargs) raise self.Continue() return wrapped
python
def continues(method): '''Method decorator signifying that the visitor should not visit the current node's children once this method has been invoked. ''' @functools.wraps(method) def wrapped(self, *args, **kwargs): yield method(self, *args, **kwargs) raise self.Continue() return wrapped
[ "def", "continues", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "method", "(", "self", ",", "*", "args", ",", "*", "*", ...
Method decorator signifying that the visitor should not visit the current node's children once this method has been invoked.
[ "Method", "decorator", "signifying", "that", "the", "visitor", "should", "not", "visit", "the", "current", "node", "s", "children", "once", "this", "method", "has", "been", "invoked", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/base.py#L38-L46
twneale/visitors
visitors/base.py
Visitor.get_methodnames
def get_methodnames(self, node): '''Given a node, generate all names for matching visitor methods. ''' nodekey = self.get_nodekey(node) prefix = self._method_prefix if isinstance(nodekey, self.GeneratorType): for nodekey in nodekey: yield self._method_prefix + nodekey else: yield self._method_prefix + nodekey
python
def get_methodnames(self, node): '''Given a node, generate all names for matching visitor methods. ''' nodekey = self.get_nodekey(node) prefix = self._method_prefix if isinstance(nodekey, self.GeneratorType): for nodekey in nodekey: yield self._method_prefix + nodekey else: yield self._method_prefix + nodekey
[ "def", "get_methodnames", "(", "self", ",", "node", ")", ":", "nodekey", "=", "self", ".", "get_nodekey", "(", "node", ")", "prefix", "=", "self", ".", "_method_prefix", "if", "isinstance", "(", "nodekey", ",", "self", ".", "GeneratorType", ")", ":", "fo...
Given a node, generate all names for matching visitor methods.
[ "Given", "a", "node", "generate", "all", "names", "for", "matching", "visitor", "methods", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/base.py#L87-L96
twneale/visitors
visitors/base.py
Visitor.get_method
def get_method(self, node): '''Given a particular node, check the visitor instance for methods mathing the computed methodnames (the function is a generator). Note that methods are cached at the class level. ''' methods = self._methods for methodname in self.get_methodnames(node): if methodname in methods: return methods[methodname] else: cls = self.__class__ method = getattr(cls, methodname, None) if method is not None: methods[methodname] = method return method
python
def get_method(self, node): '''Given a particular node, check the visitor instance for methods mathing the computed methodnames (the function is a generator). Note that methods are cached at the class level. ''' methods = self._methods for methodname in self.get_methodnames(node): if methodname in methods: return methods[methodname] else: cls = self.__class__ method = getattr(cls, methodname, None) if method is not None: methods[methodname] = method return method
[ "def", "get_method", "(", "self", ",", "node", ")", ":", "methods", "=", "self", ".", "_methods", "for", "methodname", "in", "self", ".", "get_methodnames", "(", "node", ")", ":", "if", "methodname", "in", "methods", ":", "return", "methods", "[", "metho...
Given a particular node, check the visitor instance for methods mathing the computed methodnames (the function is a generator). Note that methods are cached at the class level.
[ "Given", "a", "particular", "node", "check", "the", "visitor", "instance", "for", "methods", "mathing", "the", "computed", "methodnames", "(", "the", "function", "is", "a", "generator", ")", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/base.py#L98-L113
twneale/visitors
visitors/base.py
Visitor.visit
def visit(self, node): '''The main visit function. Visits the passed-in node and calls finalize. ''' for token in self.itervisit(node): pass result = self.finalize() if result is not self: return result
python
def visit(self, node): '''The main visit function. Visits the passed-in node and calls finalize. ''' for token in self.itervisit(node): pass result = self.finalize() if result is not self: return result
[ "def", "visit", "(", "self", ",", "node", ")", ":", "for", "token", "in", "self", ".", "itervisit", "(", "node", ")", ":", "pass", "result", "=", "self", ".", "finalize", "(", ")", "if", "result", "is", "not", "self", ":", "return", "result" ]
The main visit function. Visits the passed-in node and calls finalize.
[ "The", "main", "visit", "function", ".", "Visits", "the", "passed", "-", "in", "node", "and", "calls", "finalize", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/base.py#L129-L137
twneale/visitors
visitors/base.py
Visitor.itervisit_node
def itervisit_node(self, node): '''Given a node, find the matching visitor function (if any) and run it. If the result is a context manager, yield from all the nodes children before allowing it to exit. Otherwise, return the result. ''' # Get the corresponding method and run it. func = self.get_method(node) if func is None: generic_visit = getattr(self, 'generic_visit', None) if generic_visit is not None: result = generic_visit(node) else: # There is no handler defined for this node. return else: result = self.apply_visitor_method(func, node) # If result is a generator, yield from it. if isinstance(result, self.GeneratorType): yield from result # If result is a context manager, enter, visit children, then exit. elif isinstance(result, self.GeneratorContextManager): with result: itervisit_nodes = self.itervisit_nodes for child in self.get_children(node): try: yield from itervisit_nodes(child) except self.Continue: continue # Otherwise just yield the result. else: yield result
python
def itervisit_node(self, node): '''Given a node, find the matching visitor function (if any) and run it. If the result is a context manager, yield from all the nodes children before allowing it to exit. Otherwise, return the result. ''' # Get the corresponding method and run it. func = self.get_method(node) if func is None: generic_visit = getattr(self, 'generic_visit', None) if generic_visit is not None: result = generic_visit(node) else: # There is no handler defined for this node. return else: result = self.apply_visitor_method(func, node) # If result is a generator, yield from it. if isinstance(result, self.GeneratorType): yield from result # If result is a context manager, enter, visit children, then exit. elif isinstance(result, self.GeneratorContextManager): with result: itervisit_nodes = self.itervisit_nodes for child in self.get_children(node): try: yield from itervisit_nodes(child) except self.Continue: continue # Otherwise just yield the result. else: yield result
[ "def", "itervisit_node", "(", "self", ",", "node", ")", ":", "# Get the corresponding method and run it.", "func", "=", "self", ".", "get_method", "(", "node", ")", "if", "func", "is", "None", ":", "generic_visit", "=", "getattr", "(", "self", ",", "'generic_v...
Given a node, find the matching visitor function (if any) and run it. If the result is a context manager, yield from all the nodes children before allowing it to exit. Otherwise, return the result.
[ "Given", "a", "node", "find", "the", "matching", "visitor", "function", "(", "if", "any", ")", "and", "run", "it", ".", "If", "the", "result", "is", "a", "context", "manager", "yield", "from", "all", "the", "nodes", "children", "before", "allowing", "it"...
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/base.py#L154-L187
ttm/socialLegacy
social/fb/read.py
readGDF
def readGDF(filename="../data/RenatoFabbri06022014.gdf"): """Made to work with gdf files from my own network and friends and groups""" with open(filename,"r") as f: data=f.read() lines=data.split("\n") columns=lines[0].split(">")[1].split(",") column_names=[i.split(" ")[0] for i in columns] data_friends={cn:[] for cn in column_names} for line in lines[1:]: if not line: break if ">" in line: columns=line.split(">")[1].split(",") column_names2=[i.split(" ")[0] for i in columns] data_friendships={cn:[] for cn in column_names2} continue fields=line.split(",") if "column_names2" not in locals(): for i, field in enumerate(fields): if column_names[i] in ("name","groupid"): pass elif field.isdigit(): field=int(field) data_friends[column_names[i]].append(field) else: for i, field in enumerate(fields): if column_names2[i]=="name": pass elif field.isdigit(): field=int(field) data_friendships[column_names2[i]].append(field) return {"relations":data_friendships, "individuals":data_friends}
python
def readGDF(filename="../data/RenatoFabbri06022014.gdf"): """Made to work with gdf files from my own network and friends and groups""" with open(filename,"r") as f: data=f.read() lines=data.split("\n") columns=lines[0].split(">")[1].split(",") column_names=[i.split(" ")[0] for i in columns] data_friends={cn:[] for cn in column_names} for line in lines[1:]: if not line: break if ">" in line: columns=line.split(">")[1].split(",") column_names2=[i.split(" ")[0] for i in columns] data_friendships={cn:[] for cn in column_names2} continue fields=line.split(",") if "column_names2" not in locals(): for i, field in enumerate(fields): if column_names[i] in ("name","groupid"): pass elif field.isdigit(): field=int(field) data_friends[column_names[i]].append(field) else: for i, field in enumerate(fields): if column_names2[i]=="name": pass elif field.isdigit(): field=int(field) data_friendships[column_names2[i]].append(field) return {"relations":data_friendships, "individuals":data_friends}
[ "def", "readGDF", "(", "filename", "=", "\"../data/RenatoFabbri06022014.gdf\"", ")", ":", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "lines", "=", "data", ".", "split", "(", "\"\\n\"", "...
Made to work with gdf files from my own network and friends and groups
[ "Made", "to", "work", "with", "gdf", "files", "from", "my", "own", "network", "and", "friends", "and", "groups" ]
train
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/read.py#L164-L192
yprez/django-useful
useful/helpers/json_response.py
json_response
def json_response(data, status=200, serializer=None): """ Returns an HttpResponse object containing JSON serialized data. The mime-type is set to application/json, and the charset to UTF-8. """ return HttpResponse(json.dumps(data, default=serializer), status=status, content_type='application/json; charset=UTF-8')
python
def json_response(data, status=200, serializer=None): """ Returns an HttpResponse object containing JSON serialized data. The mime-type is set to application/json, and the charset to UTF-8. """ return HttpResponse(json.dumps(data, default=serializer), status=status, content_type='application/json; charset=UTF-8')
[ "def", "json_response", "(", "data", ",", "status", "=", "200", ",", "serializer", "=", "None", ")", ":", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "data", ",", "default", "=", "serializer", ")", ",", "status", "=", "status", ",", "cont...
Returns an HttpResponse object containing JSON serialized data. The mime-type is set to application/json, and the charset to UTF-8.
[ "Returns", "an", "HttpResponse", "object", "containing", "JSON", "serialized", "data", "." ]
train
https://github.com/yprez/django-useful/blob/288aa46df6f40fb0323c3d0c0efcded887472538/useful/helpers/json_response.py#L9-L17
yprez/django-useful
useful/helpers/json_response.py
jsonp_response
def jsonp_response(data, callback="f", status=200, serializer=None): """ Returns an HttpResponse object containing JSON serialized data, wrapped in a JSONP callback. The mime-type is set to application/x-javascript, and the charset to UTF-8. """ val = json.dumps(data, default=serializer) ret = "{callback}('{val}');".format(callback=callback, val=val) return HttpResponse(ret, status=status, content_type='application/x-javascript; charset=UTF-8')
python
def jsonp_response(data, callback="f", status=200, serializer=None): """ Returns an HttpResponse object containing JSON serialized data, wrapped in a JSONP callback. The mime-type is set to application/x-javascript, and the charset to UTF-8. """ val = json.dumps(data, default=serializer) ret = "{callback}('{val}');".format(callback=callback, val=val) return HttpResponse(ret, status=status, content_type='application/x-javascript; charset=UTF-8')
[ "def", "jsonp_response", "(", "data", ",", "callback", "=", "\"f\"", ",", "status", "=", "200", ",", "serializer", "=", "None", ")", ":", "val", "=", "json", ".", "dumps", "(", "data", ",", "default", "=", "serializer", ")", "ret", "=", "\"{callback}('...
Returns an HttpResponse object containing JSON serialized data, wrapped in a JSONP callback. The mime-type is set to application/x-javascript, and the charset to UTF-8.
[ "Returns", "an", "HttpResponse", "object", "containing", "JSON", "serialized", "data", "wrapped", "in", "a", "JSONP", "callback", "." ]
train
https://github.com/yprez/django-useful/blob/288aa46df6f40fb0323c3d0c0efcded887472538/useful/helpers/json_response.py#L20-L32
salesking/salesking_python_sdk
salesking/collection.py
get_collection_instance
def get_collection_instance(klass, api_client = None, request_api=True, **kwargs): """ instatiates the collection lookup of json type klass :param klass: json file name :param api_client: transportation api :param request_api: if True uses the default APIClient """ _type = klass if api_client is None and request_api: api_client = api.APIClient() if isinstance(klass, dict): _type = klass['type'] obj = CollectionResource(_type, api_client, **kwargs) return obj # # /** # * magic method for mapping all kinds of method calls to addFilter # * @param string $method method name # * @param array $args array of arguments # * @return SaleskingCollection # * @throws BadMethodCallException # * @since 1.0.0 # */ # public function __call($method, array $args) { # try { # $this->addFilter($method,$args[0]); # return $this; # } # catch (SaleskingException $e) # { # if($e->getCode() == "FILTER_NOTEXISTING") # { # throw new BadMethodCallException('Call to undefined method :'.$method); # } # # throw $e; # } # } def sort(self, direction = "ASC"): """ set the sort to the query ['ASC','DESC'] """ direction = directtion.upper() if direction in ['ASC','DESC']: self.sort = direction else: raise SaleskingException("SORT_INVALIDDIRECTION","Invalid sorting direction - please choose either ASC or DESC"); def sort_by(self, property): """ set sort by property to the query """ seek =u"sort_by" # make sure that the api supports sorting for this kind of object if seek in self.schema['links']['instances']['properties']: # make sure that we have a valid property if seek in self.schema['links']['instances']['properties']['sort_by']['enum']: self.sort_by = property return self else: raise SaleskingException("SORTBY_INVALIDPROPERTY","Invalid property for sorting"); else: raise SaleskingException("SORTBY_CANNOTSORT","object type doesnt support sorting");
python
def get_collection_instance(klass, api_client = None, request_api=True, **kwargs): """ instatiates the collection lookup of json type klass :param klass: json file name :param api_client: transportation api :param request_api: if True uses the default APIClient """ _type = klass if api_client is None and request_api: api_client = api.APIClient() if isinstance(klass, dict): _type = klass['type'] obj = CollectionResource(_type, api_client, **kwargs) return obj # # /** # * magic method for mapping all kinds of method calls to addFilter # * @param string $method method name # * @param array $args array of arguments # * @return SaleskingCollection # * @throws BadMethodCallException # * @since 1.0.0 # */ # public function __call($method, array $args) { # try { # $this->addFilter($method,$args[0]); # return $this; # } # catch (SaleskingException $e) # { # if($e->getCode() == "FILTER_NOTEXISTING") # { # throw new BadMethodCallException('Call to undefined method :'.$method); # } # # throw $e; # } # } def sort(self, direction = "ASC"): """ set the sort to the query ['ASC','DESC'] """ direction = directtion.upper() if direction in ['ASC','DESC']: self.sort = direction else: raise SaleskingException("SORT_INVALIDDIRECTION","Invalid sorting direction - please choose either ASC or DESC"); def sort_by(self, property): """ set sort by property to the query """ seek =u"sort_by" # make sure that the api supports sorting for this kind of object if seek in self.schema['links']['instances']['properties']: # make sure that we have a valid property if seek in self.schema['links']['instances']['properties']['sort_by']['enum']: self.sort_by = property return self else: raise SaleskingException("SORTBY_INVALIDPROPERTY","Invalid property for sorting"); else: raise SaleskingException("SORTBY_CANNOTSORT","object type doesnt support sorting");
[ "def", "get_collection_instance", "(", "klass", ",", "api_client", "=", "None", ",", "request_api", "=", "True", ",", "*", "*", "kwargs", ")", ":", "_type", "=", "klass", "if", "api_client", "is", "None", "and", "request_api", ":", "api_client", "=", "api"...
instatiates the collection lookup of json type klass :param klass: json file name :param api_client: transportation api :param request_api: if True uses the default APIClient
[ "instatiates", "the", "collection", "lookup", "of", "json", "type", "klass", ":", "param", "klass", ":", "json", "file", "name", ":", "param", "api_client", ":", "transportation", "api", ":", "param", "request_api", ":", "if", "True", "uses", "the", "default...
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L342-L407
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.set_per_page
def set_per_page(self, entries=100): """ set entries per page max 200 """ if isinstance(entries, int) and entries <= 200: self.per_page = int(entries) return self else: raise SalesKingException("PERPAGE_ONLYINT", "Please set an integer <200 for the per-page limit");
python
def set_per_page(self, entries=100): """ set entries per page max 200 """ if isinstance(entries, int) and entries <= 200: self.per_page = int(entries) return self else: raise SalesKingException("PERPAGE_ONLYINT", "Please set an integer <200 for the per-page limit");
[ "def", "set_per_page", "(", "self", ",", "entries", "=", "100", ")", ":", "if", "isinstance", "(", "entries", ",", "int", ")", "and", "entries", "<=", "200", ":", "self", ".", "per_page", "=", "int", "(", "entries", ")", "return", "self", "else", ":"...
set entries per page max 200
[ "set", "entries", "per", "page", "max", "200" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L68-L76
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.set_resource_type
def set_resource_type(self, klass): """ set type to load and load schema """ self.resource_type = klass self.schema = loaders.load_schema_raw(self.resource_type)
python
def set_resource_type(self, klass): """ set type to load and load schema """ self.resource_type = klass self.schema = loaders.load_schema_raw(self.resource_type)
[ "def", "set_resource_type", "(", "self", ",", "klass", ")", ":", "self", ".", "resource_type", "=", "klass", "self", ".", "schema", "=", "loaders", ".", "load_schema_raw", "(", "self", ".", "resource_type", ")" ]
set type to load and load schema
[ "set", "type", "to", "load", "and", "load", "schema" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L121-L126
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.set_filters
def set_filters(self, filters): """ set and validate filters dict """ if not isinstance(filters, dict): raise Exception("filters must be a dict") self.filters = {} for key in filters.keys(): value = filters[key] self.add_filter(key,value)
python
def set_filters(self, filters): """ set and validate filters dict """ if not isinstance(filters, dict): raise Exception("filters must be a dict") self.filters = {} for key in filters.keys(): value = filters[key] self.add_filter(key,value)
[ "def", "set_filters", "(", "self", ",", "filters", ")", ":", "if", "not", "isinstance", "(", "filters", ",", "dict", ")", ":", "raise", "Exception", "(", "\"filters must be a dict\"", ")", "self", ".", "filters", "=", "{", "}", "for", "key", "in", "filte...
set and validate filters dict
[ "set", "and", "validate", "filters", "dict" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L128-L137
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.add_filter
def add_filter(self, key, filter_value): """ add and validate a filter with value returns True on success otherwise exception """ seek = u"filter[%s]" % key if self.validate_filter(key, filter_value): self.filters[key] = filter_value return True else: msg = u'Invalid filter value: filter:%s value:%s' % (key, filter_value) print msg raise SalesKingException("FILTER_INVALID", msg )
python
def add_filter(self, key, filter_value): """ add and validate a filter with value returns True on success otherwise exception """ seek = u"filter[%s]" % key if self.validate_filter(key, filter_value): self.filters[key] = filter_value return True else: msg = u'Invalid filter value: filter:%s value:%s' % (key, filter_value) print msg raise SalesKingException("FILTER_INVALID", msg )
[ "def", "add_filter", "(", "self", ",", "key", ",", "filter_value", ")", ":", "seek", "=", "u\"filter[%s]\"", "%", "key", "if", "self", ".", "validate_filter", "(", "key", ",", "filter_value", ")", ":", "self", ".", "filters", "[", "key", "]", "=", "fil...
add and validate a filter with value returns True on success otherwise exception
[ "add", "and", "validate", "a", "filter", "with", "value", "returns", "True", "on", "success", "otherwise", "exception" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L139-L151
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin._is_type
def _is_type(self, instance, type): """ Check if an ``instance`` is of the provided (JSON Schema) ``type``. """ if type not in self._types: raise UnknownType(type) type = self._types[type] # bool inherits from int, so ensure bools aren't reported as integers if isinstance(instance, bool): type = _flatten(type) if int in type and bool not in type: return False return isinstance(instance, type)
python
def _is_type(self, instance, type): """ Check if an ``instance`` is of the provided (JSON Schema) ``type``. """ if type not in self._types: raise UnknownType(type) type = self._types[type] # bool inherits from int, so ensure bools aren't reported as integers if isinstance(instance, bool): type = _flatten(type) if int in type and bool not in type: return False return isinstance(instance, type)
[ "def", "_is_type", "(", "self", ",", "instance", ",", "type", ")", ":", "if", "type", "not", "in", "self", ".", "_types", ":", "raise", "UnknownType", "(", "type", ")", "type", "=", "self", ".", "_types", "[", "type", "]", "# bool inherits from int, so e...
Check if an ``instance`` is of the provided (JSON Schema) ``type``.
[ "Check", "if", "an", "instance", "is", "of", "the", "provided", "(", "JSON", "Schema", ")", "type", "." ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L153-L166
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.validate_filter
def validate_filter(self, key, filter_value): """ validate the filter key and value against the collection schema :param key: property name :param filter_value: value of the filter :returns True if all is ok otherwise False """ ok = False seek = u"filter[%s]" % key value = None for link in self.schema['links']: if link['rel'] == 'instances': for property in link['properties']: if seek == property: value = link['properties'][property] ok = True if not ok: return False ok = self._is_type(filter_value, value['type']) # if string with type add validation if ok is True and value['type'] == 'string' and 'format' in value.keys(): ok = self._validate_json_format(filter_value, value) return ok
python
def validate_filter(self, key, filter_value): """ validate the filter key and value against the collection schema :param key: property name :param filter_value: value of the filter :returns True if all is ok otherwise False """ ok = False seek = u"filter[%s]" % key value = None for link in self.schema['links']: if link['rel'] == 'instances': for property in link['properties']: if seek == property: value = link['properties'][property] ok = True if not ok: return False ok = self._is_type(filter_value, value['type']) # if string with type add validation if ok is True and value['type'] == 'string' and 'format' in value.keys(): ok = self._validate_json_format(filter_value, value) return ok
[ "def", "validate_filter", "(", "self", ",", "key", ",", "filter_value", ")", ":", "ok", "=", "False", "seek", "=", "u\"filter[%s]\"", "%", "key", "value", "=", "None", "for", "link", "in", "self", ".", "schema", "[", "'links'", "]", ":", "if", "link", ...
validate the filter key and value against the collection schema :param key: property name :param filter_value: value of the filter :returns True if all is ok otherwise False
[ "validate", "the", "filter", "key", "and", "value", "against", "the", "collection", "schema", ":", "param", "key", ":", "property", "name", ":", "param", "filter_value", ":", "value", "of", "the", "filter", ":", "returns", "True", "if", "all", "is", "ok", ...
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L169-L193
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin._validate_json_format
def _validate_json_format(self, filter_value, schema_validation_type): """ adds the type:string format:schema_validation_type :param filter_value: value of the filter :param schema_validation_type: format description of the json schema entry """ ok = False try: validators.json_schema_validation_format(filter_value, schema_validation_type) ok = True except ValueError as e: pass return ok
python
def _validate_json_format(self, filter_value, schema_validation_type): """ adds the type:string format:schema_validation_type :param filter_value: value of the filter :param schema_validation_type: format description of the json schema entry """ ok = False try: validators.json_schema_validation_format(filter_value, schema_validation_type) ok = True except ValueError as e: pass return ok
[ "def", "_validate_json_format", "(", "self", ",", "filter_value", ",", "schema_validation_type", ")", ":", "ok", "=", "False", "try", ":", "validators", ".", "json_schema_validation_format", "(", "filter_value", ",", "schema_validation_type", ")", "ok", "=", "True",...
adds the type:string format:schema_validation_type :param filter_value: value of the filter :param schema_validation_type: format description of the json schema entry
[ "adds", "the", "type", ":", "string", "format", ":", "schema_validation_type", ":", "param", "filter_value", ":", "value", "of", "the", "filter", ":", "param", "schema_validation_type", ":", "format", "description", "of", "the", "json", "schema", "entry" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L195-L207
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin._build_query_url
def _build_query_url(self, page = None, verbose = False): """ builds the url to call """ query = [] # # build the filters # for afilter in self.filters.keys(): # value = self.filters[afilter] # print"filter:%s value:%s" % (afilter,value) # value = urlencode(value) # query_str = u"%s=%s" % (afilter, value) if len(self.filters) > 0: query.append(urlencode(self.filters)) if self.sort: query_str = u"%s=%s" % (u"sort", self.sort) query.append(query_str) if self.sort_by: query_str = u"%s=%s" % (u"sort_by", self.sort_by) query.append(query_str) if self.per_page: query_str = u"%s=%s" % (u"per_page", self.per_page) query.append(query_str) if page: query_str = u"%s=%s" % (u"page", page) query.append(query_str) query = u"?%s" % (u"&".join(query)) url = u"%s%s" % (self.get_list_endpoint()['href'],query) url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url) msg = "_build_query_url: url:%s" % url log.debug(msg) if verbose: print msg return url
python
def _build_query_url(self, page = None, verbose = False): """ builds the url to call """ query = [] # # build the filters # for afilter in self.filters.keys(): # value = self.filters[afilter] # print"filter:%s value:%s" % (afilter,value) # value = urlencode(value) # query_str = u"%s=%s" % (afilter, value) if len(self.filters) > 0: query.append(urlencode(self.filters)) if self.sort: query_str = u"%s=%s" % (u"sort", self.sort) query.append(query_str) if self.sort_by: query_str = u"%s=%s" % (u"sort_by", self.sort_by) query.append(query_str) if self.per_page: query_str = u"%s=%s" % (u"per_page", self.per_page) query.append(query_str) if page: query_str = u"%s=%s" % (u"page", page) query.append(query_str) query = u"?%s" % (u"&".join(query)) url = u"%s%s" % (self.get_list_endpoint()['href'],query) url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url) msg = "_build_query_url: url:%s" % url log.debug(msg) if verbose: print msg return url
[ "def", "_build_query_url", "(", "self", ",", "page", "=", "None", ",", "verbose", "=", "False", ")", ":", "query", "=", "[", "]", "# # build the filters", "# for afilter in self.filters.keys():", "# value = self.filters[afilter]", "# prin...
builds the url to call
[ "builds", "the", "url", "to", "call" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L212-L244
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin.get_list_endpoint
def get_list_endpoint(self, rel=u"instances"): """ get the configured list entpoint for the schema.type :param rel: lookup rel: value inside the links section :returns the value :raises APIException """ schema_loaded = not self.schema is None links_present = "links" in self.schema.keys() if (schema_loaded and links_present): for row in self.schema['links']: if row['rel'] == rel: #print "row %s" % row return row raise APIException("ENDPOINT_NOTFOUND","invalid endpoint")
python
def get_list_endpoint(self, rel=u"instances"): """ get the configured list entpoint for the schema.type :param rel: lookup rel: value inside the links section :returns the value :raises APIException """ schema_loaded = not self.schema is None links_present = "links" in self.schema.keys() if (schema_loaded and links_present): for row in self.schema['links']: if row['rel'] == rel: #print "row %s" % row return row raise APIException("ENDPOINT_NOTFOUND","invalid endpoint")
[ "def", "get_list_endpoint", "(", "self", ",", "rel", "=", "u\"instances\"", ")", ":", "schema_loaded", "=", "not", "self", ".", "schema", "is", "None", "links_present", "=", "\"links\"", "in", "self", ".", "schema", ".", "keys", "(", ")", "if", "(", "sch...
get the configured list entpoint for the schema.type :param rel: lookup rel: value inside the links section :returns the value :raises APIException
[ "get", "the", "configured", "list", "entpoint", "for", "the", "schema", ".", "type", ":", "param", "rel", ":", "lookup", "rel", ":", "value", "inside", "the", "links", "section", ":", "returns", "the", "value", ":", "raises", "APIException" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L246-L260
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin._post_load
def _post_load(self, response, verbose): """ post load processing fills the self._items collection """ try: if verbose: print response.content log.debug(response.content) except Exception, e: raise e if response is not None and response.status_code == 200: types = helpers.pluralize(self.resource_type) #print "types %s" % types body = json.loads(response.content, encoding='utf-8') self.total_entries = body['collection']['total_entries'] self.total_pages = body['collection']['total_pages'] self.current_page = body['collection']['current_page'] ## now get the items from the class factory if self.total_entries != 0: for response_item in body[types]: obj = self._response_item_to_object(response_item) ## add the items self._items.append(obj) else: msg = u"Fetching failed, an error happend" raise SalesKingException("LOAD_ERROR", msg, response) return self
python
def _post_load(self, response, verbose): """ post load processing fills the self._items collection """ try: if verbose: print response.content log.debug(response.content) except Exception, e: raise e if response is not None and response.status_code == 200: types = helpers.pluralize(self.resource_type) #print "types %s" % types body = json.loads(response.content, encoding='utf-8') self.total_entries = body['collection']['total_entries'] self.total_pages = body['collection']['total_pages'] self.current_page = body['collection']['current_page'] ## now get the items from the class factory if self.total_entries != 0: for response_item in body[types]: obj = self._response_item_to_object(response_item) ## add the items self._items.append(obj) else: msg = u"Fetching failed, an error happend" raise SalesKingException("LOAD_ERROR", msg, response) return self
[ "def", "_post_load", "(", "self", ",", "response", ",", "verbose", ")", ":", "try", ":", "if", "verbose", ":", "print", "response", ".", "content", "log", ".", "debug", "(", "response", ".", "content", ")", "except", "Exception", ",", "e", ":", "raise"...
post load processing fills the self._items collection
[ "post", "load", "processing", "fills", "the", "self", ".", "_items", "collection" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L265-L295
salesking/salesking_python_sdk
salesking/collection.py
CollectionAttributesMixin._response_item_to_object
def _response_item_to_object(self, resp_item): """ take json and make a resource out of it """ item_cls = resources.get_model_class(self.resource_type) properties_dict = resp_item[self.resource_type] new_dict = helpers.remove_properties_containing_None(properties_dict) # raises exception if something goes wrong obj = item_cls(new_dict) return obj
python
def _response_item_to_object(self, resp_item): """ take json and make a resource out of it """ item_cls = resources.get_model_class(self.resource_type) properties_dict = resp_item[self.resource_type] new_dict = helpers.remove_properties_containing_None(properties_dict) # raises exception if something goes wrong obj = item_cls(new_dict) return obj
[ "def", "_response_item_to_object", "(", "self", ",", "resp_item", ")", ":", "item_cls", "=", "resources", ".", "get_model_class", "(", "self", ".", "resource_type", ")", "properties_dict", "=", "resp_item", "[", "self", ".", "resource_type", "]", "new_dict", "="...
take json and make a resource out of it
[ "take", "json", "and", "make", "a", "resource", "out", "of", "it" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L297-L306
salesking/salesking_python_sdk
salesking/collection.py
CollectionResource.load
def load(self, page = None, verbose=False): """ call to execute the collection loading :param page: integer of the page to load :param verbose: boolean to print to console :returns response :raises the SalesKingException """ url = self._build_query_url(page, verbose) response = self._load(url, verbose) response = self._post_load(response, verbose) return response
python
def load(self, page = None, verbose=False): """ call to execute the collection loading :param page: integer of the page to load :param verbose: boolean to print to console :returns response :raises the SalesKingException """ url = self._build_query_url(page, verbose) response = self._load(url, verbose) response = self._post_load(response, verbose) return response
[ "def", "load", "(", "self", ",", "page", "=", "None", ",", "verbose", "=", "False", ")", ":", "url", "=", "self", ".", "_build_query_url", "(", "page", ",", "verbose", ")", "response", "=", "self", ".", "_load", "(", "url", ",", "verbose", ")", "re...
call to execute the collection loading :param page: integer of the page to load :param verbose: boolean to print to console :returns response :raises the SalesKingException
[ "call", "to", "execute", "the", "collection", "loading", ":", "param", "page", ":", "integer", "of", "the", "page", "to", "load", ":", "param", "verbose", ":", "boolean", "to", "print", "to", "console", ":", "returns", "response", ":", "raises", "the", "...
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L313-L324
salesking/salesking_python_sdk
salesking/collection.py
CollectionResource._load
def _load(self, url, verbose): """ Execute a request against the Salesking API to fetch the items :param url: url to fetch :return response :raises SaleskingException with the corresponding http errors """ msg = u"_load url: %s" % url self._last_query_str = url log.debug(msg) if verbose: print msg response = self.__api__.request(url) return response
python
def _load(self, url, verbose): """ Execute a request against the Salesking API to fetch the items :param url: url to fetch :return response :raises SaleskingException with the corresponding http errors """ msg = u"_load url: %s" % url self._last_query_str = url log.debug(msg) if verbose: print msg response = self.__api__.request(url) return response
[ "def", "_load", "(", "self", ",", "url", ",", "verbose", ")", ":", "msg", "=", "u\"_load url: %s\"", "%", "url", "self", ".", "_last_query_str", "=", "url", "log", ".", "debug", "(", "msg", ")", "if", "verbose", ":", "print", "msg", "response", "=", ...
Execute a request against the Salesking API to fetch the items :param url: url to fetch :return response :raises SaleskingException with the corresponding http errors
[ "Execute", "a", "request", "against", "the", "Salesking", "API", "to", "fetch", "the", "items", ":", "param", "url", ":", "url", "to", "fetch", ":", "return", "response", ":", "raises", "SaleskingException", "with", "the", "corresponding", "http", "errors" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L326-L339
Synerty/peek-plugin-base
peek_plugin_base/server/PeekPlatformAdminHttpHookABC.py
PeekPlatformAdminHttpHookABC.addAdminResource
def addAdminResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootAdminResource.putChild(pluginSubPath, resource)
python
def addAdminResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootAdminResource.putChild(pluginSubPath, resource)
[ "def", "addAdminResource", "(", "self", ",", "pluginSubPath", ":", "bytes", ",", "resource", ":", "BasicResource", ")", "->", "None", ":", "pluginSubPath", "=", "pluginSubPath", ".", "strip", "(", "b'/'", ")", "self", ".", "__rootAdminResource", ".", "putChild...
Add Site Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None
[ "Add", "Site", "Resource" ]
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PeekPlatformAdminHttpHookABC.py#L31-L42
cohorte/cohorte-herald
python/snippets/herald_irc/bonus.py
BasicCommandsBot.cmd_part
def cmd_part(self, connection, sender, target, payload): """ Asks the bot to leave a channel """ if payload: connection.part(payload) else: raise ValueError("No channel given")
python
def cmd_part(self, connection, sender, target, payload): """ Asks the bot to leave a channel """ if payload: connection.part(payload) else: raise ValueError("No channel given")
[ "def", "cmd_part", "(", "self", ",", "connection", ",", "sender", ",", "target", ",", "payload", ")", ":", "if", "payload", ":", "connection", ".", "part", "(", "payload", ")", "else", ":", "raise", "ValueError", "(", "\"No channel given\"", ")" ]
Asks the bot to leave a channel
[ "Asks", "the", "bot", "to", "leave", "a", "channel" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L10-L17
cohorte/cohorte-herald
python/snippets/herald_irc/bonus.py
BasicCommandsBot.cmd_join
def cmd_join(self, connection, sender, target, payload): """ Asks the bot to join a channel """ if payload: connection.join(payload) else: raise ValueError("No channel given")
python
def cmd_join(self, connection, sender, target, payload): """ Asks the bot to join a channel """ if payload: connection.join(payload) else: raise ValueError("No channel given")
[ "def", "cmd_join", "(", "self", ",", "connection", ",", "sender", ",", "target", ",", "payload", ")", ":", "if", "payload", ":", "connection", ".", "join", "(", "payload", ")", "else", ":", "raise", "ValueError", "(", "\"No channel given\"", ")" ]
Asks the bot to join a channel
[ "Asks", "the", "bot", "to", "join", "a", "channel" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L20-L27
cohorte/cohorte-herald
python/snippets/herald_irc/bonus.py
BasicCommandsBot.cmd_echo
def cmd_echo(self, connection, sender, target, payload): """ Echoes the given payload """ connection.privmsg(target, payload or "Hello, {0}".format(sender))
python
def cmd_echo(self, connection, sender, target, payload): """ Echoes the given payload """ connection.privmsg(target, payload or "Hello, {0}".format(sender))
[ "def", "cmd_echo", "(", "self", ",", "connection", ",", "sender", ",", "target", ",", "payload", ")", ":", "connection", ".", "privmsg", "(", "target", ",", "payload", "or", "\"Hello, {0}\"", ".", "format", "(", "sender", ")", ")" ]
Echoes the given payload
[ "Echoes", "the", "given", "payload" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L29-L33
cohorte/cohorte-herald
python/snippets/herald_irc/bonus.py
BasicCommandsBot.cmd_work
def cmd_work(self, connection, sender, target, payload): """ Does some job """ connection.action(target, "is doing something...") time.sleep(int(payload or "5")) connection.action(target, "has finished !") connection.privmsg(target, "My answer is: 42.")
python
def cmd_work(self, connection, sender, target, payload): """ Does some job """ connection.action(target, "is doing something...") time.sleep(int(payload or "5")) connection.action(target, "has finished !") connection.privmsg(target, "My answer is: 42.")
[ "def", "cmd_work", "(", "self", ",", "connection", ",", "sender", ",", "target", ",", "payload", ")", ":", "connection", ".", "action", "(", "target", ",", "\"is doing something...\"", ")", "time", ".", "sleep", "(", "int", "(", "payload", "or", "\"5\"", ...
Does some job
[ "Does", "some", "job" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L43-L50
earlye/nephele
nephele/AwsLogGroup.py
AwsLogGroup.do_refresh
def do_refresh(self,args): """Refresh the view of the log group""" # prints all the groups: pprint(AwsConnectionFactory.getLogClient().describe_log_groups()) response = AwsConnectionFactory.getLogClient().describe_log_groups(logGroupNamePrefix=self.stackResource.physical_resource_id) if not 'logGroups' in response: raise Exception("Expected log group description to have logGroups entry. Got {}".format(response)) # pprint(response) descriptions = [x for x in response['logGroups'] if x['logGroupName'] == self.stackResource.physical_resource_id] if not descriptions: raise Exception("Could not find log group {} in list {}".format(self.stackResource.physical_resource_id,response['logGroups'])) self.description = descriptions[0] self.logStreams = self.loadLogStreams() print "== logStream" maxIndex = "{}".format(len(self.logStreams)+1) print "maxIndex:{}".format(maxIndex) frm = " {{0:{}d}}: {{1}}".format(len(maxIndex)) print frm index = 0 for logStream in self.logStreams: print frm.format(index,logStream['logStreamName']) index += 1
python
def do_refresh(self,args): """Refresh the view of the log group""" # prints all the groups: pprint(AwsConnectionFactory.getLogClient().describe_log_groups()) response = AwsConnectionFactory.getLogClient().describe_log_groups(logGroupNamePrefix=self.stackResource.physical_resource_id) if not 'logGroups' in response: raise Exception("Expected log group description to have logGroups entry. Got {}".format(response)) # pprint(response) descriptions = [x for x in response['logGroups'] if x['logGroupName'] == self.stackResource.physical_resource_id] if not descriptions: raise Exception("Could not find log group {} in list {}".format(self.stackResource.physical_resource_id,response['logGroups'])) self.description = descriptions[0] self.logStreams = self.loadLogStreams() print "== logStream" maxIndex = "{}".format(len(self.logStreams)+1) print "maxIndex:{}".format(maxIndex) frm = " {{0:{}d}}: {{1}}".format(len(maxIndex)) print frm index = 0 for logStream in self.logStreams: print frm.format(index,logStream['logStreamName']) index += 1
[ "def", "do_refresh", "(", "self", ",", "args", ")", ":", "# prints all the groups: pprint(AwsConnectionFactory.getLogClient().describe_log_groups())", "response", "=", "AwsConnectionFactory", ".", "getLogClient", "(", ")", ".", "describe_log_groups", "(", "logGroupNamePrefix", ...
Refresh the view of the log group
[ "Refresh", "the", "view", "of", "the", "log", "group" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsLogGroup.py#L19-L44
earlye/nephele
nephele/AwsLogGroup.py
AwsLogGroup.do_logStream
def do_logStream(self,args): """Go to the specified log stream. logStream -h for detailed help""" parser = CommandArgumentParser("logStream") parser.add_argument(dest='logStream',help='logStream index.'); args = vars(parser.parse_args(args)) print "loading log stream {}".format(args['logStream']) index = int(args['logStream']) logStream = self.logStreams[index] print "logStream:{}".format(logStream) self.childLoop(AwsLogStream.AwsLogStream(logStream,self))
python
def do_logStream(self,args): """Go to the specified log stream. logStream -h for detailed help""" parser = CommandArgumentParser("logStream") parser.add_argument(dest='logStream',help='logStream index.'); args = vars(parser.parse_args(args)) print "loading log stream {}".format(args['logStream']) index = int(args['logStream']) logStream = self.logStreams[index] print "logStream:{}".format(logStream) self.childLoop(AwsLogStream.AwsLogStream(logStream,self))
[ "def", "do_logStream", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"logStream\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'logStream'", ",", "help", "=", "'logStream index.'", ")", "args", "=", "vars", "(...
Go to the specified log stream. logStream -h for detailed help
[ "Go", "to", "the", "specified", "log", "stream", ".", "logStream", "-", "h", "for", "detailed", "help" ]
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsLogGroup.py#L58-L69
aerogear/digger-build-cli
digger/builds/__init__.py
from_path
def from_path(path): """ Selects and returns a build class based on project structure/config from a given path. :param path(str): required path argument to be used """ for item in ref: build = ref[item] valid_ = build['is_valid'] if valid_(path) is True: return build['builder'](path) raise errors.InvalidProjectStructure()
python
def from_path(path): """ Selects and returns a build class based on project structure/config from a given path. :param path(str): required path argument to be used """ for item in ref: build = ref[item] valid_ = build['is_valid'] if valid_(path) is True: return build['builder'](path) raise errors.InvalidProjectStructure()
[ "def", "from_path", "(", "path", ")", ":", "for", "item", "in", "ref", ":", "build", "=", "ref", "[", "item", "]", "valid_", "=", "build", "[", "'is_valid'", "]", "if", "valid_", "(", "path", ")", "is", "True", ":", "return", "build", "[", "'builde...
Selects and returns a build class based on project structure/config from a given path. :param path(str): required path argument to be used
[ "Selects", "and", "returns", "a", "build", "class", "based", "on", "project", "structure", "/", "config", "from", "a", "given", "path", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/builds/__init__.py#L35-L46
twneale/visitors
visitors/visitors.py
TypeVisitor.get_nodekey
def get_nodekey(self, token, types=types, coll_abc=collections.abc): '''Given a particular token, check the visitor instance for methods mathing the computed methodnames (the function is a generator). ''' # Check the mro. for mro_type in type(token).__mro__: yield mro_type.__name__ # Check for callability. if callable(token): yield 'callable' # Check for the collections.abc types. abc_types = ( 'Hashable', 'Iterable', 'Iterator', 'Sized', 'Container', 'Callable', 'Set', 'MutableSet', 'Mapping', 'MutableMapping', 'MappingView', 'KeysView', 'ItemsView', 'ValuesView', 'Sequence', 'MutableSequence', 'ByteString') for type_name in abc_types: type_ = getattr(coll_abc, type_name, None) if type_ is None: continue if isinstance(token, type_): yield type_name # Check for the standard interpreter types in the types module. interp_types = ( 'BuiltinFunctionType', 'BuiltinMethodType', 'CodeType', 'DynamicClassAttribute', 'FrameType', 'FunctionType', 'GeneratorType', 'GetSetDescriptorType', 'LambdaType', 'MappingProxyType', 'MemberDescriptorType', 'MethodType', 'ModuleType', 'SimpleNamespace', 'TracebackType') for type_name in interp_types: type_ = getattr(types, type_name, None) if type_ is None: continue if isinstance(token, type_): yield type_name
python
def get_nodekey(self, token, types=types, coll_abc=collections.abc): '''Given a particular token, check the visitor instance for methods mathing the computed methodnames (the function is a generator). ''' # Check the mro. for mro_type in type(token).__mro__: yield mro_type.__name__ # Check for callability. if callable(token): yield 'callable' # Check for the collections.abc types. abc_types = ( 'Hashable', 'Iterable', 'Iterator', 'Sized', 'Container', 'Callable', 'Set', 'MutableSet', 'Mapping', 'MutableMapping', 'MappingView', 'KeysView', 'ItemsView', 'ValuesView', 'Sequence', 'MutableSequence', 'ByteString') for type_name in abc_types: type_ = getattr(coll_abc, type_name, None) if type_ is None: continue if isinstance(token, type_): yield type_name # Check for the standard interpreter types in the types module. interp_types = ( 'BuiltinFunctionType', 'BuiltinMethodType', 'CodeType', 'DynamicClassAttribute', 'FrameType', 'FunctionType', 'GeneratorType', 'GetSetDescriptorType', 'LambdaType', 'MappingProxyType', 'MemberDescriptorType', 'MethodType', 'ModuleType', 'SimpleNamespace', 'TracebackType') for type_name in interp_types: type_ = getattr(types, type_name, None) if type_ is None: continue if isinstance(token, type_): yield type_name
[ "def", "get_nodekey", "(", "self", ",", "token", ",", "types", "=", "types", ",", "coll_abc", "=", "collections", ".", "abc", ")", ":", "# Check the mro.", "for", "mro_type", "in", "type", "(", "token", ")", ".", "__mro__", ":", "yield", "mro_type", ".",...
Given a particular token, check the visitor instance for methods mathing the computed methodnames (the function is a generator).
[ "Given", "a", "particular", "token", "check", "the", "visitor", "instance", "for", "methods", "mathing", "the", "computed", "methodnames", "(", "the", "function", "is", "a", "generator", ")", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/visitors.py#L19-L79
twneale/visitors
visitors/visitors.py
StreamVisitor.itervisit
def itervisit( self, iterable, gentype=types.GeneratorType, exhaust_generators=True): '''The main visit function. Visits the passed-in node and calls finalize. ''' self.iterable = iter(iterable) for token in self.iterable: result = self.itervisit_node(token) if exhaust_generators and isinstance(result, gentype): for output in result: yield output elif result is not None: yield result result = self.finalize() if result is self: return if isinstance(result, gentype): for output in result: yield output
python
def itervisit( self, iterable, gentype=types.GeneratorType, exhaust_generators=True): '''The main visit function. Visits the passed-in node and calls finalize. ''' self.iterable = iter(iterable) for token in self.iterable: result = self.itervisit_node(token) if exhaust_generators and isinstance(result, gentype): for output in result: yield output elif result is not None: yield result result = self.finalize() if result is self: return if isinstance(result, gentype): for output in result: yield output
[ "def", "itervisit", "(", "self", ",", "iterable", ",", "gentype", "=", "types", ".", "GeneratorType", ",", "exhaust_generators", "=", "True", ")", ":", "self", ".", "iterable", "=", "iter", "(", "iterable", ")", "for", "token", "in", "self", ".", "iterab...
The main visit function. Visits the passed-in node and calls finalize.
[ "The", "main", "visit", "function", ".", "Visits", "the", "passed", "-", "in", "node", "and", "calls", "finalize", "." ]
train
https://github.com/twneale/visitors/blob/17a2759fb0ddc0a039cf42e1bbb053295b3b2445/visitors/visitors.py#L84-L103
etcher-be/epab
epab/cmd/_print_version.py
print_version
def print_version(ctx: click.Context, _, value): """ Prints current version then exits """ if not value or ctx.resilient_parsing: return print(__version__) sys.exit(0)
python
def print_version(ctx: click.Context, _, value): """ Prints current version then exits """ if not value or ctx.resilient_parsing: return print(__version__) sys.exit(0)
[ "def", "print_version", "(", "ctx", ":", "click", ".", "Context", ",", "_", ",", "value", ")", ":", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "print", "(", "__version__", ")", "sys", ".", "exit", "(", "0", ")" ]
Prints current version then exits
[ "Prints", "current", "version", "then", "exits" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/cmd/_print_version.py#L13-L21
TAPPGuild/bitjws
bitjws/jws.py
base64url_decode
def base64url_decode(msg): """ Decode a base64 message based on JWT spec, Appendix B. "Notes on implementing base64url encoding without padding" """ rem = len(msg) % 4 if rem: msg += b'=' * (4 - rem) return base64.urlsafe_b64decode(msg)
python
def base64url_decode(msg): """ Decode a base64 message based on JWT spec, Appendix B. "Notes on implementing base64url encoding without padding" """ rem = len(msg) % 4 if rem: msg += b'=' * (4 - rem) return base64.urlsafe_b64decode(msg)
[ "def", "base64url_decode", "(", "msg", ")", ":", "rem", "=", "len", "(", "msg", ")", "%", "4", "if", "rem", ":", "msg", "+=", "b'='", "*", "(", "4", "-", "rem", ")", "return", "base64", ".", "urlsafe_b64decode", "(", "msg", ")" ]
Decode a base64 message based on JWT spec, Appendix B. "Notes on implementing base64url encoding without padding"
[ "Decode", "a", "base64", "message", "based", "on", "JWT", "spec", "Appendix", "B", ".", "Notes", "on", "implementing", "base64url", "encoding", "without", "padding" ]
train
https://github.com/TAPPGuild/bitjws/blob/bcf943e0c60985da11fb7895a416525e63728c35/bitjws/jws.py#L49-L58