repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
ihgazni2/edict
edict/edict.py
_keys_via_value_nonrecur
def _keys_via_value_nonrecur(d,v): ''' #non-recursive d = {1:'a',2:'b',3:'a'} _keys_via_value_nonrecur(d,'a') ''' rslt = [] for key in d: if(d[key] == v): rslt.append(key) return(rslt)
python
def _keys_via_value_nonrecur(d,v): ''' #non-recursive d = {1:'a',2:'b',3:'a'} _keys_via_value_nonrecur(d,'a') ''' rslt = [] for key in d: if(d[key] == v): rslt.append(key) return(rslt)
[ "def", "_keys_via_value_nonrecur", "(", "d", ",", "v", ")", ":", "rslt", "=", "[", "]", "for", "key", "in", "d", ":", "if", "(", "d", "[", "key", "]", "==", "v", ")", ":", "rslt", ".", "append", "(", "key", ")", "return", "(", "rslt", ")" ]
#non-recursive d = {1:'a',2:'b',3:'a'} _keys_via_value_nonrecur(d,'a')
[ "#non", "-", "recursive", "d", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "a", "}", "_keys_via_value_nonrecur", "(", "d", "a", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L477-L487
ihgazni2/edict
edict/edict.py
_keys_via_value
def _keys_via_value(d,value,**kwargs): ''' d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3', }, 'xx': { 'x2': 'x22', 'x1': 'x11' } }, 't': 20, 'u': { 'u1': 20 } } ''' km,vm = _d2kvmatrix(d) rvmat = _get_rvmat(d) depth = rvmat.__len__() ## #print(km) ## kdmat = _scankm(km) if('leaf_only' in kwargs): leaf_only = kwargs['leaf_only'] else: leaf_only = False if('non_leaf_only' in kwargs): non_leaf_only = kwargs['non_leaf_only'] else: non_leaf_only = False if('from_lv' in kwargs): from_lv = kwargs['from_lv'] else: from_lv = 1 if('to_lv' in kwargs): to_lv = kwargs['to_lv'] else: if('from_lv' in kwargs): to_lv = from_lv else: to_lv = depth rslt = [] for i in range(from_lv,to_lv): rvlevel = rvmat[i] for j in range(0,rvlevel.__len__()): v = rvlevel[j] cond1 = (v == value) if(leaf_only == True): cond2 = (kdmat[i][j]['leaf'] == True) elif(non_leaf_only == True): cond2 = (kdmat[i][j]['leaf'] == False) else: cond2 = True cond = (cond1 & cond2) if(cond): rslt.append(kdmat[i][j]['path']) else: pass return(rslt)
python
def _keys_via_value(d,value,**kwargs): ''' d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3', }, 'xx': { 'x2': 'x22', 'x1': 'x11' } }, 't': 20, 'u': { 'u1': 20 } } ''' km,vm = _d2kvmatrix(d) rvmat = _get_rvmat(d) depth = rvmat.__len__() ## #print(km) ## kdmat = _scankm(km) if('leaf_only' in kwargs): leaf_only = kwargs['leaf_only'] else: leaf_only = False if('non_leaf_only' in kwargs): non_leaf_only = kwargs['non_leaf_only'] else: non_leaf_only = False if('from_lv' in kwargs): from_lv = kwargs['from_lv'] else: from_lv = 1 if('to_lv' in kwargs): to_lv = kwargs['to_lv'] else: if('from_lv' in kwargs): to_lv = from_lv else: to_lv = depth rslt = [] for i in range(from_lv,to_lv): rvlevel = rvmat[i] for j in range(0,rvlevel.__len__()): v = rvlevel[j] cond1 = (v == value) if(leaf_only == True): cond2 = (kdmat[i][j]['leaf'] == True) elif(non_leaf_only == True): cond2 = (kdmat[i][j]['leaf'] == False) else: cond2 = True cond = (cond1 & cond2) if(cond): rslt.append(kdmat[i][j]['path']) else: pass return(rslt)
[ "def", "_keys_via_value", "(", "d", ",", "value", ",", "*", "*", "kwargs", ")", ":", "km", ",", "vm", "=", "_d2kvmatrix", "(", "d", ")", "rvmat", "=", "_get_rvmat", "(", "d", ")", "depth", "=", "rvmat", ".", "__len__", "(", ")", "##", "#print(km)",...
d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3', }, 'xx': { 'x2': 'x22', 'x1': 'x11' } }, 't': 20, 'u': { 'u1': 20 } }
[ "d", "=", "{", "x", ":", "{", "x2", ":", "x22", "x1", ":", "x11", "}", "y", ":", "{", "y1", ":", "v1", "y2", ":", "{", "y4", ":", "v4", "y3", ":", "v3", "}", "xx", ":", "{", "x2", ":", "x22", "x1", ":", "x11", "}", "}", "t", ":", "2...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L492-L564
ihgazni2/edict
edict/edict.py
d2kvlist
def d2kvlist(d): ''' d = {'GPSImgDirectionRef': 'M', 'GPSVersionID': b'\x02\x03\x00\x00', 'GPSImgDirection': (21900, 100)} pobj(d) kl,vl = d2kvlist(d) pobj(kl) pobj(vl) ''' kl = list(d.keys()) vl = list(d.values()) return((kl,vl))
python
def d2kvlist(d): ''' d = {'GPSImgDirectionRef': 'M', 'GPSVersionID': b'\x02\x03\x00\x00', 'GPSImgDirection': (21900, 100)} pobj(d) kl,vl = d2kvlist(d) pobj(kl) pobj(vl) ''' kl = list(d.keys()) vl = list(d.values()) return((kl,vl))
[ "def", "d2kvlist", "(", "d", ")", ":", "kl", "=", "list", "(", "d", ".", "keys", "(", ")", ")", "vl", "=", "list", "(", "d", ".", "values", "(", ")", ")", "return", "(", "(", "kl", ",", "vl", ")", ")" ]
d = {'GPSImgDirectionRef': 'M', 'GPSVersionID': b'\x02\x03\x00\x00', 'GPSImgDirection': (21900, 100)} pobj(d) kl,vl = d2kvlist(d) pobj(kl) pobj(vl)
[ "d", "=", "{", "GPSImgDirectionRef", ":", "M", "GPSVersionID", ":", "b", "\\", "x02", "\\", "x03", "\\", "x00", "\\", "x00", "GPSImgDirection", ":", "(", "21900", "100", ")", "}", "pobj", "(", "d", ")", "kl", "vl", "=", "d2kvlist", "(", "d", ")", ...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L603-L613
ihgazni2/edict
edict/edict.py
klviavl
def klviavl(d,vl): ''' must be 1:1 map ''' dkl,dvl = d2kvlist(d) kl = [] for i in range(vl.__len__()): v = vl[i] index = dvl.index(v) kl.append(dkl[index]) return(kl)
python
def klviavl(d,vl): ''' must be 1:1 map ''' dkl,dvl = d2kvlist(d) kl = [] for i in range(vl.__len__()): v = vl[i] index = dvl.index(v) kl.append(dkl[index]) return(kl)
[ "def", "klviavl", "(", "d", ",", "vl", ")", ":", "dkl", ",", "dvl", "=", "d2kvlist", "(", "d", ")", "kl", "=", "[", "]", "for", "i", "in", "range", "(", "vl", ".", "__len__", "(", ")", ")", ":", "v", "=", "vl", "[", "i", "]", "index", "="...
must be 1:1 map
[ "must", "be", "1", ":", "1", "map" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L633-L643
ihgazni2/edict
edict/edict.py
brkl2d
def brkl2d(arr,interval): ''' arr = ["color1","r1","g1","b1","a1","color2","r2","g2","b2","a2"] >>> brkl2d(arr,5) [{'color1': ['r1', 'g1', 'b1', 'a1']}, {'color2': ['r2', 'g2', 'b2', 'a2']}] ''' lngth = arr.__len__() brkseqs = elel.init_range(0,lngth,interval) l = elel.broken_seqs(arr,brkseqs) d = elel.mapv(l,lambda ele:{ele[0]:ele[1:]}) return(d)
python
def brkl2d(arr,interval): ''' arr = ["color1","r1","g1","b1","a1","color2","r2","g2","b2","a2"] >>> brkl2d(arr,5) [{'color1': ['r1', 'g1', 'b1', 'a1']}, {'color2': ['r2', 'g2', 'b2', 'a2']}] ''' lngth = arr.__len__() brkseqs = elel.init_range(0,lngth,interval) l = elel.broken_seqs(arr,brkseqs) d = elel.mapv(l,lambda ele:{ele[0]:ele[1:]}) return(d)
[ "def", "brkl2d", "(", "arr", ",", "interval", ")", ":", "lngth", "=", "arr", ".", "__len__", "(", ")", "brkseqs", "=", "elel", ".", "init_range", "(", "0", ",", "lngth", ",", "interval", ")", "l", "=", "elel", ".", "broken_seqs", "(", "arr", ",", ...
arr = ["color1","r1","g1","b1","a1","color2","r2","g2","b2","a2"] >>> brkl2d(arr,5) [{'color1': ['r1', 'g1', 'b1', 'a1']}, {'color2': ['r2', 'g2', 'b2', 'a2']}]
[ "arr", "=", "[", "color1", "r1", "g1", "b1", "a1", "color2", "r2", "g2", "b2", "a2", "]", ">>>", "brkl2d", "(", "arr", "5", ")", "[", "{", "color1", ":", "[", "r1", "g1", "b1", "a1", "]", "}", "{", "color2", ":", "[", "r2", "g2", "b2", "a2"...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L662-L672
ihgazni2/edict
edict/edict.py
_vksdesc
def _vksdesc(d): ''' d = {'a':1,'b':2,'c':2,'d':4} desc = _vksdesc(d) pobj(desc) ''' pt = copy.deepcopy(d) seqs_for_del =[] vset = set({}) for k in pt: vset.add(pt[k]) desc = {} for v in vset: desc[v] = [] for k in pt: desc[pt[k]].append(k) return(desc)
python
def _vksdesc(d): ''' d = {'a':1,'b':2,'c':2,'d':4} desc = _vksdesc(d) pobj(desc) ''' pt = copy.deepcopy(d) seqs_for_del =[] vset = set({}) for k in pt: vset.add(pt[k]) desc = {} for v in vset: desc[v] = [] for k in pt: desc[pt[k]].append(k) return(desc)
[ "def", "_vksdesc", "(", "d", ")", ":", "pt", "=", "copy", ".", "deepcopy", "(", "d", ")", "seqs_for_del", "=", "[", "]", "vset", "=", "set", "(", "{", "}", ")", "for", "k", "in", "pt", ":", "vset", ".", "add", "(", "pt", "[", "k", "]", ")",...
d = {'a':1,'b':2,'c':2,'d':4} desc = _vksdesc(d) pobj(desc)
[ "d", "=", "{", "a", ":", "1", "b", ":", "2", "c", ":", "2", "d", ":", "4", "}", "desc", "=", "_vksdesc", "(", "d", ")", "pobj", "(", "desc", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L692-L708
ihgazni2/edict
edict/edict.py
tlist2dict
def tlist2dict(tuple_list,**kwargs): ''' #duplicate keys will lost tl = [(1,2),(3,4),(1,5)] tlist2dict(tl) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(tltl.is_tlist(tuple_list)): pass else: return(None) else: pass j = {} if(deepcopy): new = copy.deepcopy(tuple_list) else: new = tuple_list for i in range(0,new.__len__()): temp = new[i] key = temp[0] value = temp[1] j[key] = value return(j)
python
def tlist2dict(tuple_list,**kwargs): ''' #duplicate keys will lost tl = [(1,2),(3,4),(1,5)] tlist2dict(tl) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(tltl.is_tlist(tuple_list)): pass else: return(None) else: pass j = {} if(deepcopy): new = copy.deepcopy(tuple_list) else: new = tuple_list for i in range(0,new.__len__()): temp = new[i] key = temp[0] value = temp[1] j[key] = value return(j)
[ "def", "tlist2dict", "(", "tuple_list", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'deepcopy'", "in", "kwargs", ")", ":", "deepcopy", "=", "kwargs", "[", "'deepcopy'", "]", "else", ":", "deepcopy", "=", "1", "if", "(", "'check'", "in", "kwargs", ...
#duplicate keys will lost tl = [(1,2),(3,4),(1,5)] tlist2dict(tl)
[ "#duplicate", "keys", "will", "lost", "tl", "=", "[", "(", "1", "2", ")", "(", "3", "4", ")", "(", "1", "5", ")", "]", "tlist2dict", "(", "tl", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L711-L742
ihgazni2/edict
edict/edict.py
dict2tlist
def dict2tlist(this_dict,**kwargs): ''' #sequence will be losted d = {'a':'b','c':'d'} dict2tlist(d) ''' if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(isinstance(this_dict,dict)): pass else: return(None) else: pass if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 tuple_list = [] if(deepcopy): new = copy.deepcopy(this_dict) else: new = this_dict i = 0 for key in this_dict: value = this_dict[key] tuple_list.append((key,value)) return(tuple_list)
python
def dict2tlist(this_dict,**kwargs): ''' #sequence will be losted d = {'a':'b','c':'d'} dict2tlist(d) ''' if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(isinstance(this_dict,dict)): pass else: return(None) else: pass if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 tuple_list = [] if(deepcopy): new = copy.deepcopy(this_dict) else: new = this_dict i = 0 for key in this_dict: value = this_dict[key] tuple_list.append((key,value)) return(tuple_list)
[ "def", "dict2tlist", "(", "this_dict", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'check'", "in", "kwargs", ")", ":", "check", "=", "kwargs", "[", "'check'", "]", "else", ":", "check", "=", "1", "if", "(", "check", ")", ":", "if", "(", "isins...
#sequence will be losted d = {'a':'b','c':'d'} dict2tlist(d)
[ "#sequence", "will", "be", "losted", "d", "=", "{", "a", ":", "b", "c", ":", "d", "}", "dict2tlist", "(", "d", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L744-L774
ihgazni2/edict
edict/edict.py
is_mirrable
def is_mirrable(d): ''' d = {1:'a',2:'a',3:'b'} ''' vl = list(d.values()) lngth1 = vl.__len__() uvl = elel.uniqualize(vl) lngth2 = uvl.__len__() cond = (lngth1 == lngth2) return(cond)
python
def is_mirrable(d): ''' d = {1:'a',2:'a',3:'b'} ''' vl = list(d.values()) lngth1 = vl.__len__() uvl = elel.uniqualize(vl) lngth2 = uvl.__len__() cond = (lngth1 == lngth2) return(cond)
[ "def", "is_mirrable", "(", "d", ")", ":", "vl", "=", "list", "(", "d", ".", "values", "(", ")", ")", "lngth1", "=", "vl", ".", "__len__", "(", ")", "uvl", "=", "elel", ".", "uniqualize", "(", "vl", ")", "lngth2", "=", "uvl", ".", "__len__", "("...
d = {1:'a',2:'a',3:'b'}
[ "d", "=", "{", "1", ":", "a", "2", ":", "a", "3", ":", "b", "}" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L778-L787
ihgazni2/edict
edict/edict.py
dict_mirror
def dict_mirror(d,**kwargs): ''' d = {1:'a',2:'a',3:'b'} ''' md = {} if('sort_func' in kwargs): sort_func = kwargs['sort_func'] else: sort_func = sorted vl = list(d.values()) uvl = elel.uniqualize(vl) for v in uvl: kl = _keys_via_value_nonrecur(d,v) k = sorted(kl)[0] md[v] = k return(md)
python
def dict_mirror(d,**kwargs): ''' d = {1:'a',2:'a',3:'b'} ''' md = {} if('sort_func' in kwargs): sort_func = kwargs['sort_func'] else: sort_func = sorted vl = list(d.values()) uvl = elel.uniqualize(vl) for v in uvl: kl = _keys_via_value_nonrecur(d,v) k = sorted(kl)[0] md[v] = k return(md)
[ "def", "dict_mirror", "(", "d", ",", "*", "*", "kwargs", ")", ":", "md", "=", "{", "}", "if", "(", "'sort_func'", "in", "kwargs", ")", ":", "sort_func", "=", "kwargs", "[", "'sort_func'", "]", "else", ":", "sort_func", "=", "sorted", "vl", "=", "li...
d = {1:'a',2:'a',3:'b'}
[ "d", "=", "{", "1", ":", "a", "2", ":", "a", "3", ":", "b", "}" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L789-L804
ihgazni2/edict
edict/edict.py
_cond_select_key_nonrecur
def _cond_select_key_nonrecur(d,cond_match=None,**kwargs): ''' d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_key_nonrecur(d,"An") _cond_select_key_nonrecur(d,"As") regex = re.compile("e$") _cond_select_key_nonrecur(d,regex) ''' if('cond_func' in kwargs): cond_func = kwargs['cond_func'] else: cond_func = _text_cond if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] rslt = {} for key in d: if(cond_func(key,cond_match,*cond_func_args)): rslt[key] = d[key] else: pass return(rslt)
python
def _cond_select_key_nonrecur(d,cond_match=None,**kwargs): ''' d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_key_nonrecur(d,"An") _cond_select_key_nonrecur(d,"As") regex = re.compile("e$") _cond_select_key_nonrecur(d,regex) ''' if('cond_func' in kwargs): cond_func = kwargs['cond_func'] else: cond_func = _text_cond if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] rslt = {} for key in d: if(cond_func(key,cond_match,*cond_func_args)): rslt[key] = d[key] else: pass return(rslt)
[ "def", "_cond_select_key_nonrecur", "(", "d", ",", "cond_match", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'cond_func'", "in", "kwargs", ")", ":", "cond_func", "=", "kwargs", "[", "'cond_func'", "]", "else", ":", "cond_func", "=", "_tex...
d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_key_nonrecur(d,"An") _cond_select_key_nonrecur(d,"As") regex = re.compile("e$") _cond_select_key_nonrecur(d,regex)
[ "d", "=", "{", "ActiveArea", ":", "50829", "Artist", ":", "315", "AsShotPreProfileMatrix", ":", "50832", "AnalogBalance", ":", "50727", "AsShotICCProfile", ":", "50831", "AsShotProfileName", ":", "50934", "AntiAliasStrength", ":", "50738", "AsShotNeutral", ":", "5...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L821-L853
ihgazni2/edict
edict/edict.py
_cond_select_value_nonrecur
def _cond_select_value_nonrecur(d,cond_match=None,**kwargs): ''' d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_value_nonrecur(d,"50") _cond_select_value_nonrecur(d,"72") regex = re.compile("8$") _cond_select_value_nonrecur(d,regex) ''' if('cond_func' in kwargs): cond_func = kwargs['cond_func'] else: cond_func = _text_cond if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] rslt = {} for key in d: value = d[key] if(cond_func(value,cond_match,*cond_func_args)): rslt[key] = d[key] else: pass return(rslt)
python
def _cond_select_value_nonrecur(d,cond_match=None,**kwargs): ''' d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_value_nonrecur(d,"50") _cond_select_value_nonrecur(d,"72") regex = re.compile("8$") _cond_select_value_nonrecur(d,regex) ''' if('cond_func' in kwargs): cond_func = kwargs['cond_func'] else: cond_func = _text_cond if('cond_func_args' in kwargs): cond_func_args = kwargs['cond_func_args'] else: cond_func_args = [] rslt = {} for key in d: value = d[key] if(cond_func(value,cond_match,*cond_func_args)): rslt[key] = d[key] else: pass return(rslt)
[ "def", "_cond_select_value_nonrecur", "(", "d", ",", "cond_match", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'cond_func'", "in", "kwargs", ")", ":", "cond_func", "=", "kwargs", "[", "'cond_func'", "]", "else", ":", "cond_func", "=", "_t...
d = { "ActiveArea":"50829", "Artist":"315", "AsShotPreProfileMatrix":"50832", "AnalogBalance":"50727", "AsShotICCProfile":"50831", "AsShotProfileName":"50934", "AntiAliasStrength":"50738", "AsShotNeutral":"50728", "AsShotWhiteXY":"50729" } _cond_select_value_nonrecur(d,"50") _cond_select_value_nonrecur(d,"72") regex = re.compile("8$") _cond_select_value_nonrecur(d,regex)
[ "d", "=", "{", "ActiveArea", ":", "50829", "Artist", ":", "315", "AsShotPreProfileMatrix", ":", "50832", "AnalogBalance", ":", "50727", "AsShotICCProfile", ":", "50831", "AsShotProfileName", ":", "50934", "AntiAliasStrength", ":", "50738", "AsShotNeutral", ":", "5...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L855-L888
ihgazni2/edict
edict/edict.py
_diff_internal
def _diff_internal(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff_internal(d1,d2) _diff_internald2,d1) ''' same =[] kdiff =[] vdiff = [] for key in d1: value = d1[key] if(key in d2): if(value == d2[key]): same.append(key) else: vdiff.append(key) else: kdiff.append(key) return({'same':same,'kdiff':kdiff,'vdiff':vdiff})
python
def _diff_internal(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff_internal(d1,d2) _diff_internald2,d1) ''' same =[] kdiff =[] vdiff = [] for key in d1: value = d1[key] if(key in d2): if(value == d2[key]): same.append(key) else: vdiff.append(key) else: kdiff.append(key) return({'same':same,'kdiff':kdiff,'vdiff':vdiff})
[ "def", "_diff_internal", "(", "d1", ",", "d2", ")", ":", "same", "=", "[", "]", "kdiff", "=", "[", "]", "vdiff", "=", "[", "]", "for", "key", "in", "d1", ":", "value", "=", "d1", "[", "key", "]", "if", "(", "key", "in", "d2", ")", ":", "if"...
d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff_internal(d1,d2) _diff_internald2,d1)
[ "d1", "=", "{", "a", ":", "x", "b", ":", "y", "c", ":", "z", "}", "d2", "=", "{", "a", ":", "x", "b", ":", "u", "d", ":", "v", "}", "_diff_internal", "(", "d1", "d2", ")", "_diff_internald2", "d1", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L921-L940
ihgazni2/edict
edict/edict.py
_union
def _union(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _union(d1,d2) _union(d2,d1) ''' u = {} ds = _diff_internal(d1,d2) for key in ds['same']: u[key] = d1[key] for key in ds['vdiff']: u[key] = d1[key] for key in ds['kdiff']: u[key] = d1[key] ds = _diff_internal(d2,d1) for key in ds['kdiff']: u[key] = d2[key] return(u)
python
def _union(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _union(d1,d2) _union(d2,d1) ''' u = {} ds = _diff_internal(d1,d2) for key in ds['same']: u[key] = d1[key] for key in ds['vdiff']: u[key] = d1[key] for key in ds['kdiff']: u[key] = d1[key] ds = _diff_internal(d2,d1) for key in ds['kdiff']: u[key] = d2[key] return(u)
[ "def", "_union", "(", "d1", ",", "d2", ")", ":", "u", "=", "{", "}", "ds", "=", "_diff_internal", "(", "d1", ",", "d2", ")", "for", "key", "in", "ds", "[", "'same'", "]", ":", "u", "[", "key", "]", "=", "d1", "[", "key", "]", "for", "key", ...
d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _union(d1,d2) _union(d2,d1)
[ "d1", "=", "{", "a", ":", "x", "b", ":", "y", "c", ":", "z", "}", "d2", "=", "{", "a", ":", "x", "b", ":", "u", "d", ":", "v", "}", "_union", "(", "d1", "d2", ")", "_union", "(", "d2", "d1", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L943-L961
ihgazni2/edict
edict/edict.py
_diff
def _diff(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff(d1,d2) _diff(d2,d1) ''' d = {} ds = _diff_internal(d1,d2) for key in ds['vdiff']: d[key] = d1[key] for key in ds['kdiff']: d[key] = d1[key] return(d)
python
def _diff(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff(d1,d2) _diff(d2,d1) ''' d = {} ds = _diff_internal(d1,d2) for key in ds['vdiff']: d[key] = d1[key] for key in ds['kdiff']: d[key] = d1[key] return(d)
[ "def", "_diff", "(", "d1", ",", "d2", ")", ":", "d", "=", "{", "}", "ds", "=", "_diff_internal", "(", "d1", ",", "d2", ")", "for", "key", "in", "ds", "[", "'vdiff'", "]", ":", "d", "[", "key", "]", "=", "d1", "[", "key", "]", "for", "key", ...
d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _diff(d1,d2) _diff(d2,d1)
[ "d1", "=", "{", "a", ":", "x", "b", ":", "y", "c", ":", "z", "}", "d2", "=", "{", "a", ":", "x", "b", ":", "u", "d", ":", "v", "}", "_diff", "(", "d1", "d2", ")", "_diff", "(", "d2", "d1", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L965-L978
ihgazni2/edict
edict/edict.py
_intersection
def _intersection(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _intersection(d1,d2) _intersection(d2,d1) ''' i = {} ds = _diff_internal(d1,d2) for key in ds['same']: i[key] = d1[key] return(i)
python
def _intersection(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _intersection(d1,d2) _intersection(d2,d1) ''' i = {} ds = _diff_internal(d1,d2) for key in ds['same']: i[key] = d1[key] return(i)
[ "def", "_intersection", "(", "d1", ",", "d2", ")", ":", "i", "=", "{", "}", "ds", "=", "_diff_internal", "(", "d1", ",", "d2", ")", "for", "key", "in", "ds", "[", "'same'", "]", ":", "i", "[", "key", "]", "=", "d1", "[", "key", "]", "return",...
d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} _intersection(d1,d2) _intersection(d2,d1)
[ "d1", "=", "{", "a", ":", "x", "b", ":", "y", "c", ":", "z", "}", "d2", "=", "{", "a", ":", "x", "b", ":", "u", "d", ":", "v", "}", "_intersection", "(", "d1", "d2", ")", "_intersection", "(", "d2", "d1", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L982-L993
ihgazni2/edict
edict/edict.py
_complement
def _complement(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} complement(d1,d2) complement(d2,d1) ''' u = _union(d1,d2) c = _diff(u,d1) return(c)
python
def _complement(d1,d2): ''' d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} complement(d1,d2) complement(d2,d1) ''' u = _union(d1,d2) c = _diff(u,d1) return(c)
[ "def", "_complement", "(", "d1", ",", "d2", ")", ":", "u", "=", "_union", "(", "d1", ",", "d2", ")", "c", "=", "_diff", "(", "u", ",", "d1", ")", "return", "(", "c", ")" ]
d1 = {'a':'x','b':'y','c':'z'} d2 = {'a':'x','b':'u','d':'v'} complement(d1,d2) complement(d2,d1)
[ "d1", "=", "{", "a", ":", "x", "b", ":", "y", "c", ":", "z", "}", "d2", "=", "{", "a", ":", "x", "b", ":", "u", "d", ":", "v", "}", "complement", "(", "d1", "d2", ")", "complement", "(", "d2", "d1", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L996-L1005
ihgazni2/edict
edict/edict.py
_uniqualize
def _uniqualize(d): ''' d = {1:'a',2:'b',3:'c',4:'b'} _uniqualize(d) ''' pt = copy.deepcopy(d) seqs_for_del =[] vset = set({}) for k in pt: vset.add(pt[k]) tslen = vset.__len__() freq = {} for k in pt: v = pt[k] if(v in freq): freq[v] = freq[v] + 1 seqs_for_del.append(k) else: freq[v] = 0 npt = {} for k in pt: if(k in seqs_for_del): pass else: npt[k] = pt[k] pt = npt return(npt)
python
def _uniqualize(d): ''' d = {1:'a',2:'b',3:'c',4:'b'} _uniqualize(d) ''' pt = copy.deepcopy(d) seqs_for_del =[] vset = set({}) for k in pt: vset.add(pt[k]) tslen = vset.__len__() freq = {} for k in pt: v = pt[k] if(v in freq): freq[v] = freq[v] + 1 seqs_for_del.append(k) else: freq[v] = 0 npt = {} for k in pt: if(k in seqs_for_del): pass else: npt[k] = pt[k] pt = npt return(npt)
[ "def", "_uniqualize", "(", "d", ")", ":", "pt", "=", "copy", ".", "deepcopy", "(", "d", ")", "seqs_for_del", "=", "[", "]", "vset", "=", "set", "(", "{", "}", ")", "for", "k", "in", "pt", ":", "vset", ".", "add", "(", "pt", "[", "k", "]", "...
d = {1:'a',2:'b',3:'c',4:'b'} _uniqualize(d)
[ "d", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "c", "4", ":", "b", "}", "_uniqualize", "(", "d", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1007-L1033
ihgazni2/edict
edict/edict.py
_extend
def _extend(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2) pobj(d) dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2,overwrite=1) pobj(d) ''' if('deepcopy' in kwargs): deepcopy=kwargs['deepcopy'] else: deepcopy=1 if('overwrite' in kwargs): overwrite=kwargs['overwrite'] else: overwrite=0 if(deepcopy): dict1 = copy.deepcopy(dict1) dict2 = copy.deepcopy(dict2) else: pass d = dict1 for key in dict2: if(key in dict1): if(overwrite): d[key] = dict2[key] else: pass else: d[key] = dict2[key] return(d)
python
def _extend(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2) pobj(d) dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2,overwrite=1) pobj(d) ''' if('deepcopy' in kwargs): deepcopy=kwargs['deepcopy'] else: deepcopy=1 if('overwrite' in kwargs): overwrite=kwargs['overwrite'] else: overwrite=0 if(deepcopy): dict1 = copy.deepcopy(dict1) dict2 = copy.deepcopy(dict2) else: pass d = dict1 for key in dict2: if(key in dict1): if(overwrite): d[key] = dict2[key] else: pass else: d[key] = dict2[key] return(d)
[ "def", "_extend", "(", "dict1", ",", "dict2", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'deepcopy'", "in", "kwargs", ")", ":", "deepcopy", "=", "kwargs", "[", "'deepcopy'", "]", "else", ":", "deepcopy", "=", "1", "if", "(", "'overwrite'", "in", ...
dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2) pobj(d) dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} d = _extend(dict1,dict2,overwrite=1) pobj(d)
[ "dict1", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "c", "4", ":", "d", "}", "dict2", "=", "{", "5", ":", "u", "2", ":", "v", "3", ":", "w", "6", ":", "x", "7", ":", "y", "}", "d", "=", "_extend", "(", "dict1", "dict2", ")", ...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1035-L1068
ihgazni2/edict
edict/edict.py
_comprise
def _comprise(dict1,dict2): ''' dict1 = {'a':1,'b':2,'c':3,'d':4} dict2 = {'b':2,'c':3} _comprise(dict1,dict2) ''' len_1 = dict1.__len__() len_2 = dict2.__len__() if(len_2>len_1): return(False) else: for k2 in dict2: v2 = dict2[k2] if(k2 in dict1): v1 = dict1[k2] if(v1 == v2): return(True) else: return(False) else: return(False)
python
def _comprise(dict1,dict2): ''' dict1 = {'a':1,'b':2,'c':3,'d':4} dict2 = {'b':2,'c':3} _comprise(dict1,dict2) ''' len_1 = dict1.__len__() len_2 = dict2.__len__() if(len_2>len_1): return(False) else: for k2 in dict2: v2 = dict2[k2] if(k2 in dict1): v1 = dict1[k2] if(v1 == v2): return(True) else: return(False) else: return(False)
[ "def", "_comprise", "(", "dict1", ",", "dict2", ")", ":", "len_1", "=", "dict1", ".", "__len__", "(", ")", "len_2", "=", "dict2", ".", "__len__", "(", ")", "if", "(", "len_2", ">", "len_1", ")", ":", "return", "(", "False", ")", "else", ":", "for...
dict1 = {'a':1,'b':2,'c':3,'d':4} dict2 = {'b':2,'c':3} _comprise(dict1,dict2)
[ "dict1", "=", "{", "a", ":", "1", "b", ":", "2", "c", ":", "3", "d", ":", "4", "}", "dict2", "=", "{", "b", ":", "2", "c", ":", "3", "}", "_comprise", "(", "dict1", "dict2", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1070-L1090
ihgazni2/edict
edict/edict.py
_update_intersection
def _update_intersection(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update_intersection(dict1,dict2) pobj(dict1) pobj(dict2) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if(deepcopy == 1): dict1 = copy.deepcopy(dict1) else: pass for key in dict2: if(key in dict1): dict1[key] = dict2[key] return(dict1)
python
def _update_intersection(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update_intersection(dict1,dict2) pobj(dict1) pobj(dict2) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if(deepcopy == 1): dict1 = copy.deepcopy(dict1) else: pass for key in dict2: if(key in dict1): dict1[key] = dict2[key] return(dict1)
[ "def", "_update_intersection", "(", "dict1", ",", "dict2", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'deepcopy'", "in", "kwargs", ")", ":", "deepcopy", "=", "kwargs", "[", "'deepcopy'", "]", "else", ":", "deepcopy", "=", "1", "if", "(", "deepcopy"...
dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update_intersection(dict1,dict2) pobj(dict1) pobj(dict2)
[ "dict1", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "c", "4", ":", "d", "}", "dict2", "=", "{", "5", ":", "u", "2", ":", "v", "3", ":", "w", "6", ":", "x", "7", ":", "y", "}", "_update_intersection", "(", "dict1", "dict2", ")", "...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1093-L1112
ihgazni2/edict
edict/edict.py
_update
def _update(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update(dict1,dict2) pobj(dict1) pobj(dict2) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if(deepcopy == 1): dict1 = copy.deepcopy(dict1) else: pass dict1 = _extend(dict1,dict2,overwrite=True,deepcopy=deepcopy) return(dict1)
python
def _update(dict1,dict2,**kwargs): ''' dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update(dict1,dict2) pobj(dict1) pobj(dict2) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if(deepcopy == 1): dict1 = copy.deepcopy(dict1) else: pass dict1 = _extend(dict1,dict2,overwrite=True,deepcopy=deepcopy) return(dict1)
[ "def", "_update", "(", "dict1", ",", "dict2", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'deepcopy'", "in", "kwargs", ")", ":", "deepcopy", "=", "kwargs", "[", "'deepcopy'", "]", "else", ":", "deepcopy", "=", "1", "if", "(", "deepcopy", "==", "...
dict1 = {1:'a',2:'b',3:'c',4:'d'} dict2 = {5:'u',2:'v',3:'w',6:'x',7:'y'} _update(dict1,dict2) pobj(dict1) pobj(dict2)
[ "dict1", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "c", "4", ":", "d", "}", "dict2", "=", "{", "5", ":", "u", "2", ":", "v", "3", ":", "w", "6", ":", "x", "7", ":", "y", "}", "_update", "(", "dict1", "dict2", ")", "pobj", "(",...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1114-L1131
ihgazni2/edict
edict/edict.py
_setdefault_via_pathlist
def _setdefault_via_pathlist(external_dict,path_list,**kwargs): ''' #if path_list already in external_dict, will do nothing y = {} path_list = ['c','b'] _setdefault_via_pathlist(y,path_list) y _setdefault_via_pathlist(y,path_list) y = {} _setdefault_via_pathlist(y,path_list) y ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 if('default_element' in kwargs): default_element = kwargs['default_element'] else: default_element = {} this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) try: this.__getitem__(key) except: try: # necessary ,when default_element = {} or [] de = copy.deepcopy(default_element) this.__setitem__(key,de) except: return(external_dict) else: pass this = this.__getitem__(key) else: this = this.__getitem__(key) return(external_dict)
python
def _setdefault_via_pathlist(external_dict,path_list,**kwargs): ''' #if path_list already in external_dict, will do nothing y = {} path_list = ['c','b'] _setdefault_via_pathlist(y,path_list) y _setdefault_via_pathlist(y,path_list) y = {} _setdefault_via_pathlist(y,path_list) y ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 if('default_element' in kwargs): default_element = kwargs['default_element'] else: default_element = {} this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) try: this.__getitem__(key) except: try: # necessary ,when default_element = {} or [] de = copy.deepcopy(default_element) this.__setitem__(key,de) except: return(external_dict) else: pass this = this.__getitem__(key) else: this = this.__getitem__(key) return(external_dict)
[ "def", "_setdefault_via_pathlist", "(", "external_dict", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'s2n'", "in", "kwargs", ")", ":", "s2n", "=", "kwargs", "[", "'s2n'", "]", "else", ":", "s2n", "=", "0", "if", "(", "'n2s'", "in...
#if path_list already in external_dict, will do nothing y = {} path_list = ['c','b'] _setdefault_via_pathlist(y,path_list) y _setdefault_via_pathlist(y,path_list) y = {} _setdefault_via_pathlist(y,path_list) y
[ "#if", "path_list", "already", "in", "external_dict", "will", "do", "nothing", "y", "=", "{}", "path_list", "=", "[", "c", "b", "]", "_setdefault_via_pathlist", "(", "y", "path_list", ")", "y", "_setdefault_via_pathlist", "(", "y", "path_list", ")", "y", "="...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1135-L1185
ihgazni2/edict
edict/edict.py
_setitem_via_pathlist
def _setitem_via_pathlist(external_dict,path_list,value,**kwargs): ''' y = {'c': {'b': {}}} _setitem_via_pathlist(y,['c','b'],200) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()-1): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) this = this.__getitem__(key) this.__setitem__(path_list[-1],value) return(external_dict)
python
def _setitem_via_pathlist(external_dict,path_list,value,**kwargs): ''' y = {'c': {'b': {}}} _setitem_via_pathlist(y,['c','b'],200) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()-1): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) this = this.__getitem__(key) this.__setitem__(path_list[-1],value) return(external_dict)
[ "def", "_setitem_via_pathlist", "(", "external_dict", ",", "path_list", ",", "value", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'s2n'", "in", "kwargs", ")", ":", "s2n", "=", "kwargs", "[", "'s2n'", "]", "else", ":", "s2n", "=", "0", "if", "(", ...
y = {'c': {'b': {}}} _setitem_via_pathlist(y,['c','b'],200)
[ "y", "=", "{", "c", ":", "{", "b", ":", "{}", "}}", "_setitem_via_pathlist", "(", "y", "[", "c", "b", "]", "200", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1193-L1220
ihgazni2/edict
edict/edict.py
_getitem_via_pathlist
def _getitem_via_pathlist(external_dict,path_list,**kwargs): ''' y = {'c': {'b': 200}} _getitem_via_pathlist(y,['c','b']) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) this = this.__getitem__(key) return(this)
python
def _getitem_via_pathlist(external_dict,path_list,**kwargs): ''' y = {'c': {'b': 200}} _getitem_via_pathlist(y,['c','b']) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) this = this.__getitem__(key) return(this)
[ "def", "_getitem_via_pathlist", "(", "external_dict", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'s2n'", "in", "kwargs", ")", ":", "s2n", "=", "kwargs", "[", "'s2n'", "]", "else", ":", "s2n", "=", "0", "if", "(", "'n2s'", "in", ...
y = {'c': {'b': 200}} _getitem_via_pathlist(y,['c','b'])
[ "y", "=", "{", "c", ":", "{", "b", ":", "200", "}}", "_getitem_via_pathlist", "(", "y", "[", "c", "b", "]", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1228-L1254
ihgazni2/edict
edict/edict.py
_include_pathlist
def _include_pathlist(external_dict,path_list,**kwargs): ''' y = { 'a': {'x':88}, 'b': { 'x': {'c':66} } } _include_pathlist(y,['a']) _include_pathlist(y,['a','x']) _include_pathlist(y,['b','x','c']) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) try: this = this.__getitem__(key) except: return(False) else: pass return(True)
python
def _include_pathlist(external_dict,path_list,**kwargs): ''' y = { 'a': {'x':88}, 'b': { 'x': {'c':66} } } _include_pathlist(y,['a']) _include_pathlist(y,['a','x']) _include_pathlist(y,['b','x','c']) ''' if('s2n' in kwargs): s2n = kwargs['s2n'] else: s2n = 0 if('n2s' in kwargs): n2s = kwargs['n2s'] else: n2s = 0 this = external_dict for i in range(0,path_list.__len__()): key = path_list[i] if(n2s ==1): key = str(key) if(s2n==1): try: int(key) except: pass else: key = int(key) try: this = this.__getitem__(key) except: return(False) else: pass return(True)
[ "def", "_include_pathlist", "(", "external_dict", ",", "path_list", ",", "*", "*", "kwargs", ")", ":", "if", "(", "'s2n'", "in", "kwargs", ")", ":", "s2n", "=", "kwargs", "[", "'s2n'", "]", "else", ":", "s2n", "=", "0", "if", "(", "'n2s'", "in", "k...
y = { 'a': {'x':88}, 'b': { 'x': {'c':66} } } _include_pathlist(y,['a']) _include_pathlist(y,['a','x']) _include_pathlist(y,['b','x','c'])
[ "y", "=", "{", "a", ":", "{", "x", ":", "88", "}", "b", ":", "{", "x", ":", "{", "c", ":", "66", "}", "}", "}", "_include_pathlist", "(", "y", "[", "a", "]", ")", "_include_pathlist", "(", "y", "[", "a", "x", "]", ")", "_include_pathlist", ...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1298-L1339
ihgazni2/edict
edict/edict.py
max_word_width
def max_word_width(myDict): ''' currd = {0:'AutoPauseSpeed', 125:'HRLimitLow', 6:'Activity'} max_wordwidth(currd) ''' maxValueWidth = 0 for each in myDict: eachValueWidth = myDict[each].__len__() if(eachValueWidth > maxValueWidth): maxValueWidth = eachValueWidth return(maxValueWidth)
python
def max_word_width(myDict): ''' currd = {0:'AutoPauseSpeed', 125:'HRLimitLow', 6:'Activity'} max_wordwidth(currd) ''' maxValueWidth = 0 for each in myDict: eachValueWidth = myDict[each].__len__() if(eachValueWidth > maxValueWidth): maxValueWidth = eachValueWidth return(maxValueWidth)
[ "def", "max_word_width", "(", "myDict", ")", ":", "maxValueWidth", "=", "0", "for", "each", "in", "myDict", ":", "eachValueWidth", "=", "myDict", "[", "each", "]", ".", "__len__", "(", ")", "if", "(", "eachValueWidth", ">", "maxValueWidth", ")", ":", "ma...
currd = {0:'AutoPauseSpeed', 125:'HRLimitLow', 6:'Activity'} max_wordwidth(currd)
[ "currd", "=", "{", "0", ":", "AutoPauseSpeed", "125", ":", "HRLimitLow", "6", ":", "Activity", "}", "max_wordwidth", "(", "currd", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1343-L1353
ihgazni2/edict
edict/edict.py
max_display_width
def max_display_width(myDict): ''' currd = {0:'你们大家好', 125:'ABCDE', 6:'1234567'} dict_get_max_word_displaywidth(currd) ''' maxValueWidth = 0 for each in myDict: eachValueWidth = str_display_width(myDict[each]) if(eachValueWidth > maxValueWidth): maxValueWidth = eachValueWidth return(maxValueWidth)
python
def max_display_width(myDict): ''' currd = {0:'你们大家好', 125:'ABCDE', 6:'1234567'} dict_get_max_word_displaywidth(currd) ''' maxValueWidth = 0 for each in myDict: eachValueWidth = str_display_width(myDict[each]) if(eachValueWidth > maxValueWidth): maxValueWidth = eachValueWidth return(maxValueWidth)
[ "def", "max_display_width", "(", "myDict", ")", ":", "maxValueWidth", "=", "0", "for", "each", "in", "myDict", ":", "eachValueWidth", "=", "str_display_width", "(", "myDict", "[", "each", "]", ")", "if", "(", "eachValueWidth", ">", "maxValueWidth", ")", ":",...
currd = {0:'你们大家好', 125:'ABCDE', 6:'1234567'} dict_get_max_word_displaywidth(currd)
[ "currd", "=", "{", "0", ":", "你们大家好", "125", ":", "ABCDE", "6", ":", "1234567", "}", "dict_get_max_word_displaywidth", "(", "currd", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1355-L1365
ihgazni2/edict
edict/edict.py
is_leaf
def is_leaf(obj): ''' the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({}) ''' if(is_dict(obj)): length = obj.__len__() if(length == 0): return(True) else: return(False) else: return(True)
python
def is_leaf(obj): ''' the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({}) ''' if(is_dict(obj)): length = obj.__len__() if(length == 0): return(True) else: return(False) else: return(True)
[ "def", "is_leaf", "(", "obj", ")", ":", "if", "(", "is_dict", "(", "obj", ")", ")", ":", "length", "=", "obj", ".", "__len__", "(", ")", "if", "(", "length", "==", "0", ")", ":", "return", "(", "True", ")", "else", ":", "return", "(", "False", ...
the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({})
[ "the", "below", "is", "for", "nested", "-", "dict", "any", "type", "is", "not", "dict", "will", "be", "treated", "as", "a", "leaf", "empty", "dict", "will", "be", "treated", "as", "a", "leaf", "from", "edict", ".", "edict", "import", "*", "is_leaf", ...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1382-L1399
ihgazni2/edict
edict/edict.py
_d2kvmatrix
def _d2kvmatrix(d): ''' d = {1: 2, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) d = {1: {2:{22:222}}, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) ## km: 按照层次存储pathlist,层次从0开始, { 1: 2, 3: { 'a': 'b' } } km[0] = [[1],[3]] km[1] = [[3,'a']] vm: vm比较特殊,不太好理解,请参照函数elel.get_wfs 和_kvmatrix2d vm的数组表示层次 rvmat: 与km对应,存储key对应的value,不过对应层次使km的层次+1 ''' km = [] vm = [list(d.values())] vm_history ={0:[0]} unhandled = [{'data':d,'kpl':[]}] while(unhandled.__len__()>0): next_unhandled = [] keys_level = [] next_vm_history = {} for i in range(0,unhandled.__len__()): data = unhandled[i]['data'] kpl = unhandled[i]['kpl'] values = list(data.values()) _setitem_via_pathlist(vm,vm_history[i],values) vm_pl = vm_history[i] del vm_history[i] keys = data.keys() keys = elel.array_map(keys,_gen_sonpl,kpl) keys_level.extend(keys) for j in range(0,values.__len__()): v = values[j] cond = is_leaf(v) if(cond): pass else: kpl = copy.deepcopy(keys[j]) next_unhandled.append({'data':v,'kpl':kpl}) vpl = copy.deepcopy(vm_pl) vpl.append(j) next_vm_history[next_unhandled.__len__()-1] = vpl vm_history = next_vm_history km.append(keys_level) unhandled = next_unhandled vm = vm[0] return((km,vm))
python
def _d2kvmatrix(d): ''' d = {1: 2, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) d = {1: {2:{22:222}}, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) ## km: 按照层次存储pathlist,层次从0开始, { 1: 2, 3: { 'a': 'b' } } km[0] = [[1],[3]] km[1] = [[3,'a']] vm: vm比较特殊,不太好理解,请参照函数elel.get_wfs 和_kvmatrix2d vm的数组表示层次 rvmat: 与km对应,存储key对应的value,不过对应层次使km的层次+1 ''' km = [] vm = [list(d.values())] vm_history ={0:[0]} unhandled = [{'data':d,'kpl':[]}] while(unhandled.__len__()>0): next_unhandled = [] keys_level = [] next_vm_history = {} for i in range(0,unhandled.__len__()): data = unhandled[i]['data'] kpl = unhandled[i]['kpl'] values = list(data.values()) _setitem_via_pathlist(vm,vm_history[i],values) vm_pl = vm_history[i] del vm_history[i] keys = data.keys() keys = elel.array_map(keys,_gen_sonpl,kpl) keys_level.extend(keys) for j in range(0,values.__len__()): v = values[j] cond = is_leaf(v) if(cond): pass else: kpl = copy.deepcopy(keys[j]) next_unhandled.append({'data':v,'kpl':kpl}) vpl = copy.deepcopy(vm_pl) vpl.append(j) next_vm_history[next_unhandled.__len__()-1] = vpl vm_history = next_vm_history km.append(keys_level) unhandled = next_unhandled vm = vm[0] return((km,vm))
[ "def", "_d2kvmatrix", "(", "d", ")", ":", "km", "=", "[", "]", "vm", "=", "[", "list", "(", "d", ".", "values", "(", ")", ")", "]", "vm_history", "=", "{", "0", ":", "[", "0", "]", "}", "unhandled", "=", "[", "{", "'data'", ":", "d", ",", ...
d = {1: 2, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) d = {1: {2:{22:222}}, 3: {'a': 'b'}} km,vm = _d2kvmatrix(d) ## km: 按照层次存储pathlist,层次从0开始, { 1: 2, 3: { 'a': 'b' } } km[0] = [[1],[3]] km[1] = [[3,'a']] vm: vm比较特殊,不太好理解,请参照函数elel.get_wfs 和_kvmatrix2d vm的数组表示层次 rvmat: 与km对应,存储key对应的value,不过对应层次使km的层次+1
[ "d", "=", "{", "1", ":", "2", "3", ":", "{", "a", ":", "b", "}}", "km", "vm", "=", "_d2kvmatrix", "(", "d", ")", "d", "=", "{", "1", ":", "{", "2", ":", "{", "22", ":", "222", "}}", "3", ":", "{", "a", ":", "b", "}}", "km", "vm", "=...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1433-L1487
ihgazni2/edict
edict/edict.py
show_kmatrix
def show_kmatrix(km): ''' d = {1: {2: {22: 222}}, 3: {'a': 'b'}} km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) ''' rslt = [] for i in range(0,km.__len__()): level = km[i] for j in range(0,level.__len__()): kpl = level[j] print(kpl) rslt.append(kpl) return(rslt)
python
def show_kmatrix(km): ''' d = {1: {2: {22: 222}}, 3: {'a': 'b'}} km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) ''' rslt = [] for i in range(0,km.__len__()): level = km[i] for j in range(0,level.__len__()): kpl = level[j] print(kpl) rslt.append(kpl) return(rslt)
[ "def", "show_kmatrix", "(", "km", ")", ":", "rslt", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "km", ".", "__len__", "(", ")", ")", ":", "level", "=", "km", "[", "i", "]", "for", "j", "in", "range", "(", "0", ",", "level", "."...
d = {1: {2: {22: 222}}, 3: {'a': 'b'}} km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km)
[ "d", "=", "{", "1", ":", "{", "2", ":", "{", "22", ":", "222", "}}", "3", ":", "{", "a", ":", "b", "}}", "km", "=", "[[[", "1", "]", "[", "3", "]]", "[[", "1", "2", "]", "[", "3", "a", "]]", "[[", "1", "2", "22", "]]]", "show_kmatrix"...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1489-L1502
ihgazni2/edict
edict/edict.py
show_vmatrix
def show_vmatrix(vm): ''' d = {1: {2: {22: 222}}, 3: {'a': 'b'}} vm = [[[222]], ['b']] show_vmatrix(vm) ''' unhandled = vm while(unhandled.__len__()>0): next_unhandled = [] for i in range(0,unhandled.__len__()): ele = unhandled[i] print(ele) cond = elel.is_leaf(ele) if(cond): pass else: children = ele[0] next_unhandled.append(children) unhandled = next_unhandled
python
def show_vmatrix(vm): ''' d = {1: {2: {22: 222}}, 3: {'a': 'b'}} vm = [[[222]], ['b']] show_vmatrix(vm) ''' unhandled = vm while(unhandled.__len__()>0): next_unhandled = [] for i in range(0,unhandled.__len__()): ele = unhandled[i] print(ele) cond = elel.is_leaf(ele) if(cond): pass else: children = ele[0] next_unhandled.append(children) unhandled = next_unhandled
[ "def", "show_vmatrix", "(", "vm", ")", ":", "unhandled", "=", "vm", "while", "(", "unhandled", ".", "__len__", "(", ")", ">", "0", ")", ":", "next_unhandled", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "unhandled", ".", "__len__", "("...
d = {1: {2: {22: 222}}, 3: {'a': 'b'}} vm = [[[222]], ['b']] show_vmatrix(vm)
[ "d", "=", "{", "1", ":", "{", "2", ":", "{", "22", ":", "222", "}}", "3", ":", "{", "a", ":", "b", "}}", "vm", "=", "[[[", "222", "]]", "[", "b", "]]", "show_vmatrix", "(", "vm", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1504-L1522
ihgazni2/edict
edict/edict.py
_kvmatrix2d
def _kvmatrix2d(km,vm): ''' km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) vm = [[[222]], ['b']] show_vmatrix(vm) d = _kvmatrix2d(km,vm) ''' d = {} kmwfs = get_kmwfs(km) vmwfs = elel.get_wfs(vm) lngth = vmwfs.__len__() for i in range(0,lngth): value = elel.getitem_via_pathlist(vm,vmwfs[i]) cond = elel.is_leaf(value) if(cond): _setitem_via_pathlist(d,kmwfs[i],value) else: _setdefault_via_pathlist(d,kmwfs[i]) return(d)
python
def _kvmatrix2d(km,vm): ''' km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) vm = [[[222]], ['b']] show_vmatrix(vm) d = _kvmatrix2d(km,vm) ''' d = {} kmwfs = get_kmwfs(km) vmwfs = elel.get_wfs(vm) lngth = vmwfs.__len__() for i in range(0,lngth): value = elel.getitem_via_pathlist(vm,vmwfs[i]) cond = elel.is_leaf(value) if(cond): _setitem_via_pathlist(d,kmwfs[i],value) else: _setdefault_via_pathlist(d,kmwfs[i]) return(d)
[ "def", "_kvmatrix2d", "(", "km", ",", "vm", ")", ":", "d", "=", "{", "}", "kmwfs", "=", "get_kmwfs", "(", "km", ")", "vmwfs", "=", "elel", ".", "get_wfs", "(", "vm", ")", "lngth", "=", "vmwfs", ".", "__len__", "(", ")", "for", "i", "in", "range...
km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) vm = [[[222]], ['b']] show_vmatrix(vm) d = _kvmatrix2d(km,vm)
[ "km", "=", "[[[", "1", "]", "[", "3", "]]", "[[", "1", "2", "]", "[", "3", "a", "]]", "[[", "1", "2", "22", "]]]", "show_kmatrix", "(", "km", ")", "vm", "=", "[[[", "222", "]]", "[", "b", "]]", "show_vmatrix", "(", "vm", ")", "d", "=", "_...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1908-L1929
ihgazni2/edict
edict/edict.py
_get_rvmat
def _get_rvmat(d): ''' d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3' } }, 't': 20, 'u': { 'u1': 'u2' } } ''' km,vm = _d2kvmatrix(d) def map_func(ele,indexc,indexr): return(_getitem_via_pathlist(d,ele)) rvmat = elel.matrix_map(km,map_func) rvmat = elel.prepend(rvmat,[]) return(rvmat)
python
def _get_rvmat(d): ''' d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3' } }, 't': 20, 'u': { 'u1': 'u2' } } ''' km,vm = _d2kvmatrix(d) def map_func(ele,indexc,indexr): return(_getitem_via_pathlist(d,ele)) rvmat = elel.matrix_map(km,map_func) rvmat = elel.prepend(rvmat,[]) return(rvmat)
[ "def", "_get_rvmat", "(", "d", ")", ":", "km", ",", "vm", "=", "_d2kvmatrix", "(", "d", ")", "def", "map_func", "(", "ele", ",", "indexc", ",", "indexr", ")", ":", "return", "(", "_getitem_via_pathlist", "(", "d", ",", "ele", ")", ")", "rvmat", "="...
d = { 'x': { 'x2': 'x22', 'x1': 'x11' }, 'y': { 'y1': 'v1', 'y2': { 'y4': 'v4', 'y3': 'v3' } }, 't': 20, 'u': { 'u1': 'u2' } }
[ "d", "=", "{", "x", ":", "{", "x2", ":", "x22", "x1", ":", "x11", "}", "y", ":", "{", "y1", ":", "v1", "y2", ":", "{", "y4", ":", "v4", "y3", ":", "v3", "}", "}", "t", ":", "20", "u", ":", "{", "u1", ":", "u2", "}", "}" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L1962-L1993
ihgazni2/edict
edict/edict.py
get_vndmat_attr
def get_vndmat_attr(d,keypath,attr,**kwargs): ''' get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True) ''' kt,vn = _d2kvmatrix(d) kdmat = _scankm(kt) ltree = elel.ListTree(vn) vndmat = ltree.desc loc = get_kdmat_loc(kdmat,keypath) rslt = vndmat[loc[0]][loc[1]][attr] if(rslt == None): pass elif(elel.is_matrix(rslt,mode='loose')): if('path2loc' in kwargs): rslt = elel.array_map(rslt,ltree.path2loc) else: pass if('path2keypath' in kwargs): nlocs = elel.array_map(rslt,ltree.path2loc) def cond_func(ele,kdmat): return(kdmat[ele[0]][ele[1]]['path']) rslt = elel.array_map(nlocs,cond_func,kdmat) else: pass else: if('path2loc' in kwargs): rslt = ltree.path2loc(rslt) else: pass if('path2keypath' in kwargs): nloc = ltree.path2loc(rslt) rslt = kdmat[nloc[0]][nloc[1]]['path'] else: pass return(rslt)
python
def get_vndmat_attr(d,keypath,attr,**kwargs): ''' get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True) ''' kt,vn = _d2kvmatrix(d) kdmat = _scankm(kt) ltree = elel.ListTree(vn) vndmat = ltree.desc loc = get_kdmat_loc(kdmat,keypath) rslt = vndmat[loc[0]][loc[1]][attr] if(rslt == None): pass elif(elel.is_matrix(rslt,mode='loose')): if('path2loc' in kwargs): rslt = elel.array_map(rslt,ltree.path2loc) else: pass if('path2keypath' in kwargs): nlocs = elel.array_map(rslt,ltree.path2loc) def cond_func(ele,kdmat): return(kdmat[ele[0]][ele[1]]['path']) rslt = elel.array_map(nlocs,cond_func,kdmat) else: pass else: if('path2loc' in kwargs): rslt = ltree.path2loc(rslt) else: pass if('path2keypath' in kwargs): nloc = ltree.path2loc(rslt) rslt = kdmat[nloc[0]][nloc[1]]['path'] else: pass return(rslt)
[ "def", "get_vndmat_attr", "(", "d", ",", "keypath", ",", "attr", ",", "*", "*", "kwargs", ")", ":", "kt", ",", "vn", "=", "_d2kvmatrix", "(", "d", ")", "kdmat", "=", "_scankm", "(", "kt", ")", "ltree", "=", "elel", ".", "ListTree", "(", "vn", ")"...
get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True) get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True)
[ "get_vndmat_attr", "(", "d", "[", "x", "]", "lsib_path", "path2keypath", "=", "True", ")", "get_vndmat_attr", "(", "d", "[", "t", "]", "lsib_path", "path2keypath", "=", "True", ")", "get_vndmat_attr", "(", "d", "[", "u", "]", "lsib_path", "path2keypath", "...
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L2037-L2074
ihgazni2/edict
edict/edict.py
Edict.sub_via_value
def sub_via_value(self,*vs,**kwargs): ''' d= {1:'a',2:'b',3:'a',4:'d',5:'e'} ed = eded.Edict(d) ed.sub_via_value('a','d') ''' sd = _select_norecur_via_value(self.dict,*vs,**kwargs) return(Edict(sd))
python
def sub_via_value(self,*vs,**kwargs): ''' d= {1:'a',2:'b',3:'a',4:'d',5:'e'} ed = eded.Edict(d) ed.sub_via_value('a','d') ''' sd = _select_norecur_via_value(self.dict,*vs,**kwargs) return(Edict(sd))
[ "def", "sub_via_value", "(", "self", ",", "*", "vs", ",", "*", "*", "kwargs", ")", ":", "sd", "=", "_select_norecur_via_value", "(", "self", ".", "dict", ",", "*", "vs", ",", "*", "*", "kwargs", ")", "return", "(", "Edict", "(", "sd", ")", ")" ]
d= {1:'a',2:'b',3:'a',4:'d',5:'e'} ed = eded.Edict(d) ed.sub_via_value('a','d')
[ "d", "=", "{", "1", ":", "a", "2", ":", "b", "3", ":", "a", "4", ":", "d", "5", ":", "e", "}", "ed", "=", "eded", ".", "Edict", "(", "d", ")", "ed", ".", "sub_via_value", "(", "a", "d", ")" ]
train
https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L2112-L2119
gkmngrgn/radpress
radpress/templatetags/radpress_tags.py
radpress_get_markup_descriptions
def radpress_get_markup_descriptions(): """ Provides markup options. It used for adding descriptions in admin and zen mode. :return: list """ result = [] for markup in get_markup_choices(): markup_name = markup[0] result.append({ 'name': markup_name, 'title': markup[1], 'description': trim(get_reader(markup=markup_name).description) }) return result
python
def radpress_get_markup_descriptions(): """ Provides markup options. It used for adding descriptions in admin and zen mode. :return: list """ result = [] for markup in get_markup_choices(): markup_name = markup[0] result.append({ 'name': markup_name, 'title': markup[1], 'description': trim(get_reader(markup=markup_name).description) }) return result
[ "def", "radpress_get_markup_descriptions", "(", ")", ":", "result", "=", "[", "]", "for", "markup", "in", "get_markup_choices", "(", ")", ":", "markup_name", "=", "markup", "[", "0", "]", "result", ".", "append", "(", "{", "'name'", ":", "markup_name", ","...
Provides markup options. It used for adding descriptions in admin and zen mode. :return: list
[ "Provides", "markup", "options", ".", "It", "used", "for", "adding", "descriptions", "in", "admin", "and", "zen", "mode", "." ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/templatetags/radpress_tags.py#L46-L61
knowmalware/camcrypt
camcrypt/utils.py
_get_parser
def _get_parser(description): """Build an ArgumentParser with common arguments for both operations.""" parser = argparse.ArgumentParser(description=description) parser.add_argument('key', help="Camellia key.") parser.add_argument('input_file', nargs='*', help="File(s) to read as input data. If none are " "provided, assume STDIN.") parser.add_argument('-o', '--output_file', help="Output file. If not provided, assume STDOUT.") parser.add_argument('-l', '--keylen', type=int, default=128, help="Length of 'key' in bits, must be in one of %s " "(default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS) parser.add_argument('-H', '--hexkey', action='store_true', help="Treat 'key' as a hex string rather than binary.") return parser
python
def _get_parser(description): """Build an ArgumentParser with common arguments for both operations.""" parser = argparse.ArgumentParser(description=description) parser.add_argument('key', help="Camellia key.") parser.add_argument('input_file', nargs='*', help="File(s) to read as input data. If none are " "provided, assume STDIN.") parser.add_argument('-o', '--output_file', help="Output file. If not provided, assume STDOUT.") parser.add_argument('-l', '--keylen', type=int, default=128, help="Length of 'key' in bits, must be in one of %s " "(default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS) parser.add_argument('-H', '--hexkey', action='store_true', help="Treat 'key' as a hex string rather than binary.") return parser
[ "def", "_get_parser", "(", "description", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ")", "parser", ".", "add_argument", "(", "'key'", ",", "help", "=", "\"Camellia key.\"", ")", "parser", ".", "add_argu...
Build an ArgumentParser with common arguments for both operations.
[ "Build", "an", "ArgumentParser", "with", "common", "arguments", "for", "both", "operations", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/utils.py#L11-L26
knowmalware/camcrypt
camcrypt/utils.py
_get_crypto
def _get_crypto(keylen, hexkey, key): """Return a camcrypt.CamCrypt object based on keylen, hexkey, and key.""" if keylen not in camcrypt.ACCEPTABLE_KEY_LENGTHS: raise ValueError("key length must be one of 128, 192, or 256") if hexkey: key = key.decode('hex') return camcrypt.CamCrypt(keylen=keylen, key=key)
python
def _get_crypto(keylen, hexkey, key): """Return a camcrypt.CamCrypt object based on keylen, hexkey, and key.""" if keylen not in camcrypt.ACCEPTABLE_KEY_LENGTHS: raise ValueError("key length must be one of 128, 192, or 256") if hexkey: key = key.decode('hex') return camcrypt.CamCrypt(keylen=keylen, key=key)
[ "def", "_get_crypto", "(", "keylen", ",", "hexkey", ",", "key", ")", ":", "if", "keylen", "not", "in", "camcrypt", ".", "ACCEPTABLE_KEY_LENGTHS", ":", "raise", "ValueError", "(", "\"key length must be one of 128, 192, or 256\"", ")", "if", "hexkey", ":", "key", ...
Return a camcrypt.CamCrypt object based on keylen, hexkey, and key.
[ "Return", "a", "camcrypt", ".", "CamCrypt", "object", "based", "on", "keylen", "hexkey", "and", "key", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/utils.py#L28-L36
knowmalware/camcrypt
camcrypt/utils.py
_get_data
def _get_data(filenames): """Read data from file(s) or STDIN. Args: filenames (list): List of files to read to get data. If empty or None, read from STDIN. """ if filenames: data = "" for filename in filenames: with open(filename, "rb") as f: data += f.read() else: data = sys.stdin.read() return data
python
def _get_data(filenames): """Read data from file(s) or STDIN. Args: filenames (list): List of files to read to get data. If empty or None, read from STDIN. """ if filenames: data = "" for filename in filenames: with open(filename, "rb") as f: data += f.read() else: data = sys.stdin.read() return data
[ "def", "_get_data", "(", "filenames", ")", ":", "if", "filenames", ":", "data", "=", "\"\"", "for", "filename", "in", "filenames", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "data", "+=", "f", ".", "read", "(", ")", "...
Read data from file(s) or STDIN. Args: filenames (list): List of files to read to get data. If empty or None, read from STDIN.
[ "Read", "data", "from", "file", "(", "s", ")", "or", "STDIN", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/utils.py#L38-L53
knowmalware/camcrypt
camcrypt/utils.py
_print_results
def _print_results(filename, data): """Print data to a file or STDOUT. Args: filename (str or None): If None, print to STDOUT; otherwise, print to the file with this name. data (str): Data to print. """ if filename: with open(filename, 'wb') as f: f.write(data) else: print data
python
def _print_results(filename, data): """Print data to a file or STDOUT. Args: filename (str or None): If None, print to STDOUT; otherwise, print to the file with this name. data (str): Data to print. """ if filename: with open(filename, 'wb') as f: f.write(data) else: print data
[ "def", "_print_results", "(", "filename", ",", "data", ")", ":", "if", "filename", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "else", ":", "print", "data" ]
Print data to a file or STDOUT. Args: filename (str or None): If None, print to STDOUT; otherwise, print to the file with this name. data (str): Data to print.
[ "Print", "data", "to", "a", "file", "or", "STDOUT", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/utils.py#L55-L67
caesar0301/relogger
relogger/syslog.py
HEADER.timestamp
def timestamp(self, value): """ The local time when the message was written. Must follow the format 'Mmm DD HH:MM:SS'. If the day of the month is less than 10, then it MUST be represented as a space and then the number. """ if not self._timestamp_is_valid(value): value = self._calculate_current_timestamp() self._timestamp = value
python
def timestamp(self, value): """ The local time when the message was written. Must follow the format 'Mmm DD HH:MM:SS'. If the day of the month is less than 10, then it MUST be represented as a space and then the number. """ if not self._timestamp_is_valid(value): value = self._calculate_current_timestamp() self._timestamp = value
[ "def", "timestamp", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "_timestamp_is_valid", "(", "value", ")", ":", "value", "=", "self", ".", "_calculate_current_timestamp", "(", ")", "self", ".", "_timestamp", "=", "value" ]
The local time when the message was written. Must follow the format 'Mmm DD HH:MM:SS'. If the day of the month is less than 10, then it MUST be represented as a space and then the number.
[ "The", "local", "time", "when", "the", "message", "was", "written", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L118-L130
caesar0301/relogger
relogger/syslog.py
HEADER.hostname
def hostname(self, value): """ The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name. """ if value is None: value = socket.gethostname() self._hostname = value
python
def hostname(self, value): """ The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name. """ if value is None: value = socket.gethostname() self._hostname = value
[ "def", "hostname", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "value", "=", "socket", ".", "gethostname", "(", ")", "self", ".", "_hostname", "=", "value" ]
The hostname where the log message was created. Should be the first part of the hostname, or an IP address. Should NOT be set to a fully qualified domain name.
[ "The", "hostname", "where", "the", "log", "message", "was", "created", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L153-L164
caesar0301/relogger
relogger/syslog.py
MSG.tag
def tag(self, value): """The name of the program that generated the log message. The tag can only contain alphanumeric characters. If the tag is longer than {MAX_TAG_LEN} characters it will be truncated automatically. """ if value is None: value = sys.argv[0] self._tag = value[:self.MAX_TAG_LEN]
python
def tag(self, value): """The name of the program that generated the log message. The tag can only contain alphanumeric characters. If the tag is longer than {MAX_TAG_LEN} characters it will be truncated automatically. """ if value is None: value = sys.argv[0] self._tag = value[:self.MAX_TAG_LEN]
[ "def", "tag", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "value", "=", "sys", ".", "argv", "[", "0", "]", "self", ".", "_tag", "=", "value", "[", ":", "self", ".", "MAX_TAG_LEN", "]" ]
The name of the program that generated the log message. The tag can only contain alphanumeric characters. If the tag is longer than {MAX_TAG_LEN} characters it will be truncated automatically.
[ "The", "name", "of", "the", "program", "that", "generated", "the", "log", "message", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L207-L217
caesar0301/relogger
relogger/syslog.py
MSG.content
def content(self, value): """The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message. """ value = self._prepend_seperator(value) self._content = value
python
def content(self, value): """The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message. """ value = self._prepend_seperator(value) self._content = value
[ "def", "content", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "_prepend_seperator", "(", "value", ")", "self", ".", "_content", "=", "value" ]
The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message.
[ "The", "main", "component", "of", "the", "log", "message", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L224-L233
caesar0301/relogger
relogger/syslog.py
Syslog.log
def log(self, facility, level, text, pid=False): """Send the message text to all registered hosts. The facility and level will be used to create the packet's PRI part. The HEADER will be automatically determined from the current time and hostname. The MSG will be set from the running program's name and the text parameter. This is the simplest way to use reSyslog.Syslog, creating log messages containing the current time, hostname, program name, etc. This is how you do it:: logger = syslog.Syslog() logger.add_host("localhost") logger.log(Facility.USER, Level.INFO, "Hello World") If pid is True the process ID will be prepended to the text parameter, enclosed in square brackets and followed by a colon. """ pri = PRI(facility, level) header = HEADER() if pid: msg = MSG(content=text, pid=os.getpid()) else: msg = MSG(content=text) packet = Packet(pri, header, msg) self._send_packet_to_hosts(packet)
python
def log(self, facility, level, text, pid=False): """Send the message text to all registered hosts. The facility and level will be used to create the packet's PRI part. The HEADER will be automatically determined from the current time and hostname. The MSG will be set from the running program's name and the text parameter. This is the simplest way to use reSyslog.Syslog, creating log messages containing the current time, hostname, program name, etc. This is how you do it:: logger = syslog.Syslog() logger.add_host("localhost") logger.log(Facility.USER, Level.INFO, "Hello World") If pid is True the process ID will be prepended to the text parameter, enclosed in square brackets and followed by a colon. """ pri = PRI(facility, level) header = HEADER() if pid: msg = MSG(content=text, pid=os.getpid()) else: msg = MSG(content=text) packet = Packet(pri, header, msg) self._send_packet_to_hosts(packet)
[ "def", "log", "(", "self", ",", "facility", ",", "level", ",", "text", ",", "pid", "=", "False", ")", ":", "pri", "=", "PRI", "(", "facility", ",", "level", ")", "header", "=", "HEADER", "(", ")", "if", "pid", ":", "msg", "=", "MSG", "(", "cont...
Send the message text to all registered hosts. The facility and level will be used to create the packet's PRI part. The HEADER will be automatically determined from the current time and hostname. The MSG will be set from the running program's name and the text parameter. This is the simplest way to use reSyslog.Syslog, creating log messages containing the current time, hostname, program name, etc. This is how you do it:: logger = syslog.Syslog() logger.add_host("localhost") logger.log(Facility.USER, Level.INFO, "Hello World") If pid is True the process ID will be prepended to the text parameter, enclosed in square brackets and followed by a colon.
[ "Send", "the", "message", "text", "to", "all", "registered", "hosts", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/syslog.py#L314-L342
patarapolw/memorable-password
memorable_password/password.py
GeneratePassword.refresh
def refresh(self, count_common=4, min_common=1000, timeout=20): """ Generate a new sentence :param int count_common: the number of words with minimal commonness :param int min_common: the minimal commonness based on Google common word list :param float timeout: time in seconds to timeout :return list of str: return tokens on success >>> GeneratePassword().refresh() ['The', 'men', 'in', 'power', 'are', 'committed', 'in', 'principle', 'to', 'modernization', ',', 'but', 'economic', 'and', 'social', 'changes', 'are', 'proceeding', 'only', 'erratically', '.'] """ start = time() while time() - start < timeout: tokens = [token for token, pos in self.brown.get_tagged_sent()] current_count = 0 for word, commonness in self.sentence_tool.rate(tokens): if commonness > min_common: current_count += 1 if current_count >= count_common: self.tokens = tokens return self.tokens raise TimeoutError
python
def refresh(self, count_common=4, min_common=1000, timeout=20): """ Generate a new sentence :param int count_common: the number of words with minimal commonness :param int min_common: the minimal commonness based on Google common word list :param float timeout: time in seconds to timeout :return list of str: return tokens on success >>> GeneratePassword().refresh() ['The', 'men', 'in', 'power', 'are', 'committed', 'in', 'principle', 'to', 'modernization', ',', 'but', 'economic', 'and', 'social', 'changes', 'are', 'proceeding', 'only', 'erratically', '.'] """ start = time() while time() - start < timeout: tokens = [token for token, pos in self.brown.get_tagged_sent()] current_count = 0 for word, commonness in self.sentence_tool.rate(tokens): if commonness > min_common: current_count += 1 if current_count >= count_common: self.tokens = tokens return self.tokens raise TimeoutError
[ "def", "refresh", "(", "self", ",", "count_common", "=", "4", ",", "min_common", "=", "1000", ",", "timeout", "=", "20", ")", ":", "start", "=", "time", "(", ")", "while", "time", "(", ")", "-", "start", "<", "timeout", ":", "tokens", "=", "[", "...
Generate a new sentence :param int count_common: the number of words with minimal commonness :param int min_common: the minimal commonness based on Google common word list :param float timeout: time in seconds to timeout :return list of str: return tokens on success >>> GeneratePassword().refresh() ['The', 'men', 'in', 'power', 'are', 'committed', 'in', 'principle', 'to', 'modernization', ',', 'but', 'economic', 'and', 'social', 'changes', 'are', 'proceeding', 'only', 'erratically', '.']
[ "Generate", "a", "new", "sentence", ":", "param", "int", "count_common", ":", "the", "number", "of", "words", "with", "minimal", "commonness", ":", "param", "int", "min_common", ":", "the", "minimal", "commonness", "based", "on", "Google", "common", "word", ...
train
https://github.com/patarapolw/memorable-password/blob/f53a2afa4104238e1770dfd4d85710bc00719302/memorable_password/password.py#L30-L52
patarapolw/memorable-password
memorable_password/password.py
GeneratePassword.new_common_diceware_password
def new_common_diceware_password(self, number_of_words=6, hint=''): """ Return a suggested password :param int number_of_words: number of words generated :param str hint: :return tuple: a suggested password and a sentence >>> GeneratePassword().new_common_diceware_password() ('rive2sidelauraarchitectss!mplytheOreticalassessMeNt$', [('Mynheer', False), (',', False), ('Sir', False), ('Francis', False), (',', False), ('the', False), ('riverside', True), ('laura', True), (',', False), ('the', False), ('very', False), ('architects', True), ('of', False), ('the', False), ('river', False), ('on', False), ('his', False), ('right', False), ('purling', False), ('simply', True), ('to', False), ('the', False), ('bay', False), ('past', False), ('fish', False), ('weirs', False), ('and', False), ('rocks', False), (',', False), ('and', False), ('ahead', False), ('the', False), ('theoretical', True), ('assessments', True)]) """ keywords = [self.wordlist.get_random_word() for _ in range(number_of_words)] password = self.conformizer.conformize(keywords) if hint: keywords = [hint] + keywords return password, self.to_sentence.from_keywords(keywords)
python
def new_common_diceware_password(self, number_of_words=6, hint=''): """ Return a suggested password :param int number_of_words: number of words generated :param str hint: :return tuple: a suggested password and a sentence >>> GeneratePassword().new_common_diceware_password() ('rive2sidelauraarchitectss!mplytheOreticalassessMeNt$', [('Mynheer', False), (',', False), ('Sir', False), ('Francis', False), (',', False), ('the', False), ('riverside', True), ('laura', True), (',', False), ('the', False), ('very', False), ('architects', True), ('of', False), ('the', False), ('river', False), ('on', False), ('his', False), ('right', False), ('purling', False), ('simply', True), ('to', False), ('the', False), ('bay', False), ('past', False), ('fish', False), ('weirs', False), ('and', False), ('rocks', False), (',', False), ('and', False), ('ahead', False), ('the', False), ('theoretical', True), ('assessments', True)]) """ keywords = [self.wordlist.get_random_word() for _ in range(number_of_words)] password = self.conformizer.conformize(keywords) if hint: keywords = [hint] + keywords return password, self.to_sentence.from_keywords(keywords)
[ "def", "new_common_diceware_password", "(", "self", ",", "number_of_words", "=", "6", ",", "hint", "=", "''", ")", ":", "keywords", "=", "[", "self", ".", "wordlist", ".", "get_random_word", "(", ")", "for", "_", "in", "range", "(", "number_of_words", ")",...
Return a suggested password :param int number_of_words: number of words generated :param str hint: :return tuple: a suggested password and a sentence >>> GeneratePassword().new_common_diceware_password() ('rive2sidelauraarchitectss!mplytheOreticalassessMeNt$', [('Mynheer', False), (',', False), ('Sir', False), ('Francis', False), (',', False), ('the', False), ('riverside', True), ('laura', True), (',', False), ('the', False), ('very', False), ('architects', True), ('of', False), ('the', False), ('river', False), ('on', False), ('his', False), ('right', False), ('purling', False), ('simply', True), ('to', False), ('the', False), ('bay', False), ('past', False), ('fish', False), ('weirs', False), ('and', False), ('rocks', False), (',', False), ('and', False), ('ahead', False), ('the', False), ('theoretical', True), ('assessments', True)])
[ "Return", "a", "suggested", "password", ":", "param", "int", "number_of_words", ":", "number", "of", "words", "generated", ":", "param", "str", "hint", ":", ":", "return", "tuple", ":", "a", "suggested", "password", "and", "a", "sentence" ]
train
https://github.com/patarapolw/memorable-password/blob/f53a2afa4104238e1770dfd4d85710bc00719302/memorable_password/password.py#L54-L68
patarapolw/memorable-password
memorable_password/password.py
GeneratePassword.new_pin
def new_pin(self, min_length=4, min_common=1000, timeout=20, refresh_timeout=3): """ Return a suggested PIN :param int min_length: minimum length of the PIN generated :param int min_common: the minimal commonness to be considered convertible to a PIN :param float timeout: main timeout in seconds :param float refresh_timeout: timeout to new sentence :return str: a string of digits >>> GeneratePassword().new_pin() ('32700', [('His', False), ('mouth', True), ('was', False), ('open', False), (',', False), ('his', False), ('neck', True), ('corded', True), ('with', False), ('the', False), ('strain', True), ('of', False), ('his', False), ('screams', True)]) """ self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) start = time() while time() - start < timeout: pin = '' for token, commonness in rating: if commonness >= min_common: key = self.mnemonic.word_to_key('major_system', token.lower()) if key is not None: pin += key if len(pin) < min_length: self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) else: return pin, list(self.overlap_pin(pin, self.tokens)) return None
python
def new_pin(self, min_length=4, min_common=1000, timeout=20, refresh_timeout=3): """ Return a suggested PIN :param int min_length: minimum length of the PIN generated :param int min_common: the minimal commonness to be considered convertible to a PIN :param float timeout: main timeout in seconds :param float refresh_timeout: timeout to new sentence :return str: a string of digits >>> GeneratePassword().new_pin() ('32700', [('His', False), ('mouth', True), ('was', False), ('open', False), (',', False), ('his', False), ('neck', True), ('corded', True), ('with', False), ('the', False), ('strain', True), ('of', False), ('his', False), ('screams', True)]) """ self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) start = time() while time() - start < timeout: pin = '' for token, commonness in rating: if commonness >= min_common: key = self.mnemonic.word_to_key('major_system', token.lower()) if key is not None: pin += key if len(pin) < min_length: self.refresh(count_common=min_length, min_common=min_common, timeout=refresh_timeout) rating = self.sentence_tool.rate(self.tokens) else: return pin, list(self.overlap_pin(pin, self.tokens)) return None
[ "def", "new_pin", "(", "self", ",", "min_length", "=", "4", ",", "min_common", "=", "1000", ",", "timeout", "=", "20", ",", "refresh_timeout", "=", "3", ")", ":", "self", ".", "refresh", "(", "count_common", "=", "min_length", ",", "min_common", "=", "...
Return a suggested PIN :param int min_length: minimum length of the PIN generated :param int min_common: the minimal commonness to be considered convertible to a PIN :param float timeout: main timeout in seconds :param float refresh_timeout: timeout to new sentence :return str: a string of digits >>> GeneratePassword().new_pin() ('32700', [('His', False), ('mouth', True), ('was', False), ('open', False), (',', False), ('his', False), ('neck', True), ('corded', True), ('with', False), ('the', False), ('strain', True), ('of', False), ('his', False), ('screams', True)])
[ "Return", "a", "suggested", "PIN" ]
train
https://github.com/patarapolw/memorable-password/blob/f53a2afa4104238e1770dfd4d85710bc00719302/memorable_password/password.py#L103-L134
wglass/lighthouse
lighthouse/checks/http.py
HTTPCheck.apply_check_config
def apply_check_config(self, config): """ Takes a validated config dictionary and sets the `uri`, `use_https` and `method` attributes based on the config's contents. """ self.uri = config["uri"] self.use_https = config.get("https", False) self.method = config.get("method", "GET")
python
def apply_check_config(self, config): """ Takes a validated config dictionary and sets the `uri`, `use_https` and `method` attributes based on the config's contents. """ self.uri = config["uri"] self.use_https = config.get("https", False) self.method = config.get("method", "GET")
[ "def", "apply_check_config", "(", "self", ",", "config", ")", ":", "self", ".", "uri", "=", "config", "[", "\"uri\"", "]", "self", ".", "use_https", "=", "config", ".", "get", "(", "\"https\"", ",", "False", ")", "self", ".", "method", "=", "config", ...
Takes a validated config dictionary and sets the `uri`, `use_https` and `method` attributes based on the config's contents.
[ "Takes", "a", "validated", "config", "dictionary", "and", "sets", "the", "uri", "use_https", "and", "method", "attributes", "based", "on", "the", "config", "s", "contents", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/http.py#L43-L50
wglass/lighthouse
lighthouse/checks/http.py
HTTPCheck.perform
def perform(self): """ Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. """ if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
python
def perform(self): """ Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. """ if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
[ "def", "perform", "(", "self", ")", ":", "if", "self", ".", "use_https", ":", "conn", "=", "client", ".", "HTTPSConnection", "(", "self", ".", "host", ",", "self", ".", "port", ")", "else", ":", "conn", "=", "client", ".", "HTTPConnection", "(", "sel...
Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method.
[ "Performs", "a", "simple", "HTTP", "request", "against", "the", "configured", "url", "and", "returns", "true", "if", "the", "response", "has", "a", "2xx", "code", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/http.py#L52-L73
wglass/lighthouse
lighthouse/writer.py
Writer.sync_balancer_files
def sync_balancer_files(self): """ Syncs the config files for each present Balancer instance. Submits the work to sync each file as a work pool job. """ def sync(): for balancer in self.configurables[Balancer].values(): balancer.sync_file(self.configurables[Cluster].values()) self.work_pool.submit(sync)
python
def sync_balancer_files(self): """ Syncs the config files for each present Balancer instance. Submits the work to sync each file as a work pool job. """ def sync(): for balancer in self.configurables[Balancer].values(): balancer.sync_file(self.configurables[Cluster].values()) self.work_pool.submit(sync)
[ "def", "sync_balancer_files", "(", "self", ")", ":", "def", "sync", "(", ")", ":", "for", "balancer", "in", "self", ".", "configurables", "[", "Balancer", "]", ".", "values", "(", ")", ":", "balancer", ".", "sync_file", "(", "self", ".", "configurables",...
Syncs the config files for each present Balancer instance. Submits the work to sync each file as a work pool job.
[ "Syncs", "the", "config", "files", "for", "each", "present", "Balancer", "instance", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L28-L39
wglass/lighthouse
lighthouse/writer.py
Writer.on_balancer_remove
def on_balancer_remove(self, name): """ The removal of a load balancer config isn't supported just yet. If the balancer being removed is the only configured one we fire a critical log message saying so. A writer setup with no balancers is less than useless. """ if len(self.configurables[Balancer]) == 1: logger.critical( "'%s' config file removed! It was the only balancer left!", name )
python
def on_balancer_remove(self, name): """ The removal of a load balancer config isn't supported just yet. If the balancer being removed is the only configured one we fire a critical log message saying so. A writer setup with no balancers is less than useless. """ if len(self.configurables[Balancer]) == 1: logger.critical( "'%s' config file removed! It was the only balancer left!", name )
[ "def", "on_balancer_remove", "(", "self", ",", "name", ")", ":", "if", "len", "(", "self", ".", "configurables", "[", "Balancer", "]", ")", "==", "1", ":", "logger", ".", "critical", "(", "\"'%s' config file removed! It was the only balancer left!\"", ",", "name...
The removal of a load balancer config isn't supported just yet. If the balancer being removed is the only configured one we fire a critical log message saying so. A writer setup with no balancers is less than useless.
[ "The", "removal", "of", "a", "load", "balancer", "config", "isn", "t", "supported", "just", "yet", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L53-L65
wglass/lighthouse
lighthouse/writer.py
Writer.on_discovery_add
def on_discovery_add(self, discovery): """ When a discovery is added we call `connect()` on it and launch a thread for each cluster where the discovery watches for changes to the cluster's nodes. """ discovery.connect() for cluster in self.configurables[Cluster].values(): if cluster.discovery != discovery.name: continue self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files ) self.sync_balancer_files()
python
def on_discovery_add(self, discovery): """ When a discovery is added we call `connect()` on it and launch a thread for each cluster where the discovery watches for changes to the cluster's nodes. """ discovery.connect() for cluster in self.configurables[Cluster].values(): if cluster.discovery != discovery.name: continue self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files ) self.sync_balancer_files()
[ "def", "on_discovery_add", "(", "self", ",", "discovery", ")", ":", "discovery", ".", "connect", "(", ")", "for", "cluster", "in", "self", ".", "configurables", "[", "Cluster", "]", ".", "values", "(", ")", ":", "if", "cluster", ".", "discovery", "!=", ...
When a discovery is added we call `connect()` on it and launch a thread for each cluster where the discovery watches for changes to the cluster's nodes.
[ "When", "a", "discovery", "is", "added", "we", "call", "connect", "()", "on", "it", "and", "launch", "a", "thread", "for", "each", "cluster", "where", "the", "discovery", "watches", "for", "changes", "to", "the", "cluster", "s", "nodes", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L67-L84
wglass/lighthouse
lighthouse/writer.py
Writer.on_discovery_remove
def on_discovery_remove(self, name): """ When a Discovery is removed we must make sure to call its `stop()` method to close any connections or do any clean up. """ self.configurables[Discovery][name].stop() self.sync_balancer_files()
python
def on_discovery_remove(self, name): """ When a Discovery is removed we must make sure to call its `stop()` method to close any connections or do any clean up. """ self.configurables[Discovery][name].stop() self.sync_balancer_files()
[ "def", "on_discovery_remove", "(", "self", ",", "name", ")", ":", "self", ".", "configurables", "[", "Discovery", "]", "[", "name", "]", ".", "stop", "(", ")", "self", ".", "sync_balancer_files", "(", ")" ]
When a Discovery is removed we must make sure to call its `stop()` method to close any connections or do any clean up.
[ "When", "a", "Discovery", "is", "removed", "we", "must", "make", "sure", "to", "call", "its", "stop", "()", "method", "to", "close", "any", "connections", "or", "do", "any", "clean", "up", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L86-L93
wglass/lighthouse
lighthouse/writer.py
Writer.on_cluster_add
def on_cluster_add(self, cluster): """ Once a cluster is added we tell its associated discovery method to start watching for changes to the cluster's child nodes (if the discovery method is configured and available). """ if cluster.discovery not in self.configurables[Discovery]: return discovery = self.configurables[Discovery][cluster.discovery] self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files )
python
def on_cluster_add(self, cluster): """ Once a cluster is added we tell its associated discovery method to start watching for changes to the cluster's child nodes (if the discovery method is configured and available). """ if cluster.discovery not in self.configurables[Discovery]: return discovery = self.configurables[Discovery][cluster.discovery] self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files )
[ "def", "on_cluster_add", "(", "self", ",", "cluster", ")", ":", "if", "cluster", ".", "discovery", "not", "in", "self", ".", "configurables", "[", "Discovery", "]", ":", "return", "discovery", "=", "self", ".", "configurables", "[", "Discovery", "]", "[", ...
Once a cluster is added we tell its associated discovery method to start watching for changes to the cluster's child nodes (if the discovery method is configured and available).
[ "Once", "a", "cluster", "is", "added", "we", "tell", "its", "associated", "discovery", "method", "to", "start", "watching", "for", "changes", "to", "the", "cluster", "s", "child", "nodes", "(", "if", "the", "discovery", "method", "is", "configured", "and", ...
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L95-L109
wglass/lighthouse
lighthouse/writer.py
Writer.on_cluster_update
def on_cluster_update(self, name, new_config): """ Callback hook for when a cluster is updated. Or main concern when a cluster is updated is whether or not the associated discovery method changed. If it did, we make sure that the old discovery method stops watching for the cluster's changes (if the old method is around) and that the new method *starts* watching for the cluster's changes (if the new method is actually around). Regardless of how the discovery method shuffling plays out the `sync_balancer_files` method is called. """ cluster = self.configurables[Cluster][name] old_discovery = cluster.discovery new_discovery = new_config["discovery"] if old_discovery == new_discovery: self.sync_balancer_files() return logger.info( "Switching '%s' cluster discovery from '%s' to '%s'", name, old_discovery, new_discovery ) if old_discovery in self.configurables[Discovery]: self.configurables[Discovery][old_discovery].stop_watching( cluster ) self.kill_thread(cluster.name) if new_discovery not in self.configurables[Discovery]: logger.warn( "New discovery '%s' for cluster '%s' is unknown/unavailable.", new_discovery, name ) self.sync_balancer_files() return discovery = self.configurables[Discovery][new_discovery] self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files )
python
def on_cluster_update(self, name, new_config): """ Callback hook for when a cluster is updated. Or main concern when a cluster is updated is whether or not the associated discovery method changed. If it did, we make sure that the old discovery method stops watching for the cluster's changes (if the old method is around) and that the new method *starts* watching for the cluster's changes (if the new method is actually around). Regardless of how the discovery method shuffling plays out the `sync_balancer_files` method is called. """ cluster = self.configurables[Cluster][name] old_discovery = cluster.discovery new_discovery = new_config["discovery"] if old_discovery == new_discovery: self.sync_balancer_files() return logger.info( "Switching '%s' cluster discovery from '%s' to '%s'", name, old_discovery, new_discovery ) if old_discovery in self.configurables[Discovery]: self.configurables[Discovery][old_discovery].stop_watching( cluster ) self.kill_thread(cluster.name) if new_discovery not in self.configurables[Discovery]: logger.warn( "New discovery '%s' for cluster '%s' is unknown/unavailable.", new_discovery, name ) self.sync_balancer_files() return discovery = self.configurables[Discovery][new_discovery] self.launch_thread( cluster.name, discovery.start_watching, cluster, self.sync_balancer_files )
[ "def", "on_cluster_update", "(", "self", ",", "name", ",", "new_config", ")", ":", "cluster", "=", "self", ".", "configurables", "[", "Cluster", "]", "[", "name", "]", "old_discovery", "=", "cluster", ".", "discovery", "new_discovery", "=", "new_config", "["...
Callback hook for when a cluster is updated. Or main concern when a cluster is updated is whether or not the associated discovery method changed. If it did, we make sure that the old discovery method stops watching for the cluster's changes (if the old method is around) and that the new method *starts* watching for the cluster's changes (if the new method is actually around). Regardless of how the discovery method shuffling plays out the `sync_balancer_files` method is called.
[ "Callback", "hook", "for", "when", "a", "cluster", "is", "updated", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L111-L154
wglass/lighthouse
lighthouse/writer.py
Writer.on_cluster_remove
def on_cluster_remove(self, name): """ Stops the cluster's associated discovery method from watching for changes to the cluster's nodes. """ discovery_name = self.configurables[Cluster][name].discovery if discovery_name in self.configurables[Discovery]: self.configurables[Discovery][discovery_name].stop_watching( self.configurables[Cluster][name] ) self.kill_thread(name) self.sync_balancer_files()
python
def on_cluster_remove(self, name): """ Stops the cluster's associated discovery method from watching for changes to the cluster's nodes. """ discovery_name = self.configurables[Cluster][name].discovery if discovery_name in self.configurables[Discovery]: self.configurables[Discovery][discovery_name].stop_watching( self.configurables[Cluster][name] ) self.kill_thread(name) self.sync_balancer_files()
[ "def", "on_cluster_remove", "(", "self", ",", "name", ")", ":", "discovery_name", "=", "self", ".", "configurables", "[", "Cluster", "]", "[", "name", "]", ".", "discovery", "if", "discovery_name", "in", "self", ".", "configurables", "[", "Discovery", "]", ...
Stops the cluster's associated discovery method from watching for changes to the cluster's nodes.
[ "Stops", "the", "cluster", "s", "associated", "discovery", "method", "from", "watching", "for", "changes", "to", "the", "cluster", "s", "nodes", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/writer.py#L156-L168
gbiggs/rtctree
rtctree/path.py
parse_path
def parse_path(path): '''Parses an address into directory and port parts. The last segment of the address will be checked to see if it matches a port specification (i.e. contains a colon followed by text). This will be returned separately from the directory parts. If a leading / is given, that will be returned as the first directory component. All other / characters are removed. All leading / characters are condensed into a single leading /. Any path components that are . will be removed, as they just point to the previous path component. For example, '/localhost/.' will become '/localhost'. Any path components that are .. will be removed, along with the previous path component. If this renders the path empty, it will be replaced with '/'. Examples: >>> parse_path('localhost:30000/manager/comp0.rtc') (['localhost:30000', 'manager', 'comp0.rtc'], None) >>> parse_path('localhost/manager/comp0.rtc:in') (['localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('/localhost/manager/comp0.rtc') (['/', 'localhost', 'manager', 'comp0.rtc'], None) >>> parse_path('/localhost/manager/comp0.rtc:in') (['/', 'localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('manager/comp0.rtc') (['manager', 'comp0.rtc'], None) >>> parse_path('comp0.rtc') (['comp0.rtc'], None) ''' bits = path.lstrip('/').split('/') if not bits: raise exceptions.BadPathError(path) if bits[-1]: bits[-1], port = get_port(bits[-1]) else: port = None if path[0] == '/': bits = ['/'] + bits condensed_bits = [] for bit in bits: if bit == '.': continue if bit == '..': condensed_bits = condensed_bits[:-1] continue condensed_bits.append(bit) if not condensed_bits: condensed_bits = ['/'] return condensed_bits, port
python
def parse_path(path): '''Parses an address into directory and port parts. The last segment of the address will be checked to see if it matches a port specification (i.e. contains a colon followed by text). This will be returned separately from the directory parts. If a leading / is given, that will be returned as the first directory component. All other / characters are removed. All leading / characters are condensed into a single leading /. Any path components that are . will be removed, as they just point to the previous path component. For example, '/localhost/.' will become '/localhost'. Any path components that are .. will be removed, along with the previous path component. If this renders the path empty, it will be replaced with '/'. Examples: >>> parse_path('localhost:30000/manager/comp0.rtc') (['localhost:30000', 'manager', 'comp0.rtc'], None) >>> parse_path('localhost/manager/comp0.rtc:in') (['localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('/localhost/manager/comp0.rtc') (['/', 'localhost', 'manager', 'comp0.rtc'], None) >>> parse_path('/localhost/manager/comp0.rtc:in') (['/', 'localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('manager/comp0.rtc') (['manager', 'comp0.rtc'], None) >>> parse_path('comp0.rtc') (['comp0.rtc'], None) ''' bits = path.lstrip('/').split('/') if not bits: raise exceptions.BadPathError(path) if bits[-1]: bits[-1], port = get_port(bits[-1]) else: port = None if path[0] == '/': bits = ['/'] + bits condensed_bits = [] for bit in bits: if bit == '.': continue if bit == '..': condensed_bits = condensed_bits[:-1] continue condensed_bits.append(bit) if not condensed_bits: condensed_bits = ['/'] return condensed_bits, port
[ "def", "parse_path", "(", "path", ")", ":", "bits", "=", "path", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "if", "not", "bits", ":", "raise", "exceptions", ".", "BadPathError", "(", "path", ")", "if", "bits", "[", "-", "1", "...
Parses an address into directory and port parts. The last segment of the address will be checked to see if it matches a port specification (i.e. contains a colon followed by text). This will be returned separately from the directory parts. If a leading / is given, that will be returned as the first directory component. All other / characters are removed. All leading / characters are condensed into a single leading /. Any path components that are . will be removed, as they just point to the previous path component. For example, '/localhost/.' will become '/localhost'. Any path components that are .. will be removed, along with the previous path component. If this renders the path empty, it will be replaced with '/'. Examples: >>> parse_path('localhost:30000/manager/comp0.rtc') (['localhost:30000', 'manager', 'comp0.rtc'], None) >>> parse_path('localhost/manager/comp0.rtc:in') (['localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('/localhost/manager/comp0.rtc') (['/', 'localhost', 'manager', 'comp0.rtc'], None) >>> parse_path('/localhost/manager/comp0.rtc:in') (['/', 'localhost', 'manager', 'comp0.rtc'], 'in') >>> parse_path('manager/comp0.rtc') (['manager', 'comp0.rtc'], None) >>> parse_path('comp0.rtc') (['comp0.rtc'], None)
[ "Parses", "an", "address", "into", "directory", "and", "port", "parts", ".", "The", "last", "segment", "of", "the", "address", "will", "be", "checked", "to", "see", "if", "it", "matches", "a", "port", "specification", "(", "i", ".", "e", ".", "contains",...
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/path.py#L28-L87
gbiggs/rtctree
rtctree/path.py
format_path
def format_path(path): '''Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc' ''' if path[1]: port = ':' + path[1] else: port = '' if type(path[0]) is str: # Don't add slashes if the path is singular return path[0] + port if path[0][0] == '/': starter = '/' path = path[0][1:] else: starter = '' path = path[0] return starter + '/'.join(path) + port
python
def format_path(path): '''Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc' ''' if path[1]: port = ':' + path[1] else: port = '' if type(path[0]) is str: # Don't add slashes if the path is singular return path[0] + port if path[0][0] == '/': starter = '/' path = path[0][1:] else: starter = '' path = path[0] return starter + '/'.join(path) + port
[ "def", "format_path", "(", "path", ")", ":", "if", "path", "[", "1", "]", ":", "port", "=", "':'", "+", "path", "[", "1", "]", "else", ":", "port", "=", "''", "if", "type", "(", "path", "[", "0", "]", ")", "is", "str", ":", "# Don't add slashes...
Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc'
[ "Formats", "a", "path", "as", "a", "string", "placing", "/", "between", "each", "component", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/path.py#L100-L140
ltalirz/aiida-gudhi
aiida_gudhi/data/rips.py
RipsDistanceMatrixParameters.cmdline_params
def cmdline_params(self, distance_matrix_file_name='distance.matrix', remote_folder_path=None): """Synthesize command line parameters e.g. [ ['--output-file', 'out.barcode'], ['distance_matrix.file']] :param distance_matrix_file_name: Name of distance matrix file :param remote_folder_path: Path to remote folder containing distance matrix file """ parameters = [] pm_dict = self.get_dict() for k, v in pm_dict.iteritems(): parameters += ['--' + k, v] # distance matrix can be provided via remote folder if remote_folder_path is None: parameters += [distance_matrix_file_name] else: parameters += [remote_folder_path + distance_matrix_file_name] return map(str, parameters)
python
def cmdline_params(self, distance_matrix_file_name='distance.matrix', remote_folder_path=None): """Synthesize command line parameters e.g. [ ['--output-file', 'out.barcode'], ['distance_matrix.file']] :param distance_matrix_file_name: Name of distance matrix file :param remote_folder_path: Path to remote folder containing distance matrix file """ parameters = [] pm_dict = self.get_dict() for k, v in pm_dict.iteritems(): parameters += ['--' + k, v] # distance matrix can be provided via remote folder if remote_folder_path is None: parameters += [distance_matrix_file_name] else: parameters += [remote_folder_path + distance_matrix_file_name] return map(str, parameters)
[ "def", "cmdline_params", "(", "self", ",", "distance_matrix_file_name", "=", "'distance.matrix'", ",", "remote_folder_path", "=", "None", ")", ":", "parameters", "=", "[", "]", "pm_dict", "=", "self", ".", "get_dict", "(", ")", "for", "k", ",", "v", "in", ...
Synthesize command line parameters e.g. [ ['--output-file', 'out.barcode'], ['distance_matrix.file']] :param distance_matrix_file_name: Name of distance matrix file :param remote_folder_path: Path to remote folder containing distance matrix file
[ "Synthesize", "command", "line", "parameters" ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/data/rips.py#L46-L69
wglass/lighthouse
lighthouse/log/cli.py
color_string
def color_string(color, string): """ Colorizes a given string, if coloring is available. """ if not color_available: return string return color + string + colorama.Fore.RESET
python
def color_string(color, string): """ Colorizes a given string, if coloring is available. """ if not color_available: return string return color + string + colorama.Fore.RESET
[ "def", "color_string", "(", "color", ",", "string", ")", ":", "if", "not", "color_available", ":", "return", "string", "return", "color", "+", "string", "+", "colorama", ".", "Fore", ".", "RESET" ]
Colorizes a given string, if coloring is available.
[ "Colorizes", "a", "given", "string", "if", "coloring", "is", "available", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L11-L18
wglass/lighthouse
lighthouse/log/cli.py
color_for_level
def color_for_level(level): """ Returns the colorama Fore color for a given log level. If color is not available, returns None. """ if not color_available: return None return { logging.DEBUG: colorama.Fore.WHITE, logging.INFO: colorama.Fore.BLUE, logging.WARNING: colorama.Fore.YELLOW, logging.ERROR: colorama.Fore.RED, logging.CRITICAL: colorama.Fore.MAGENTA }.get(level, colorama.Fore.WHITE)
python
def color_for_level(level): """ Returns the colorama Fore color for a given log level. If color is not available, returns None. """ if not color_available: return None return { logging.DEBUG: colorama.Fore.WHITE, logging.INFO: colorama.Fore.BLUE, logging.WARNING: colorama.Fore.YELLOW, logging.ERROR: colorama.Fore.RED, logging.CRITICAL: colorama.Fore.MAGENTA }.get(level, colorama.Fore.WHITE)
[ "def", "color_for_level", "(", "level", ")", ":", "if", "not", "color_available", ":", "return", "None", "return", "{", "logging", ".", "DEBUG", ":", "colorama", ".", "Fore", ".", "WHITE", ",", "logging", ".", "INFO", ":", "colorama", ".", "Fore", ".", ...
Returns the colorama Fore color for a given log level. If color is not available, returns None.
[ "Returns", "the", "colorama", "Fore", "color", "for", "a", "given", "log", "level", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L21-L36
wglass/lighthouse
lighthouse/log/cli.py
create_thread_color_cycle
def create_thread_color_cycle(): """ Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead. """ if not color_available: return itertools.cycle([None]) return itertools.cycle( ( colorama.Fore.CYAN, colorama.Fore.BLUE, colorama.Fore.MAGENTA, colorama.Fore.GREEN, ) )
python
def create_thread_color_cycle(): """ Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead. """ if not color_available: return itertools.cycle([None]) return itertools.cycle( ( colorama.Fore.CYAN, colorama.Fore.BLUE, colorama.Fore.MAGENTA, colorama.Fore.GREEN, ) )
[ "def", "create_thread_color_cycle", "(", ")", ":", "if", "not", "color_available", ":", "return", "itertools", ".", "cycle", "(", "[", "None", "]", ")", "return", "itertools", ".", "cycle", "(", "(", "colorama", ".", "Fore", ".", "CYAN", ",", "colorama", ...
Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead.
[ "Generates", "a", "never", "-", "ending", "cycle", "of", "colors", "to", "choose", "from", "for", "individual", "threads", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L39-L57
wglass/lighthouse
lighthouse/log/cli.py
color_for_thread
def color_for_thread(thread_id): """ Associates the thread ID with the next color in the `thread_colors` cycle, so that thread-specific parts of a log have a consistent separate color. """ if thread_id not in seen_thread_colors: seen_thread_colors[thread_id] = next(thread_colors) return seen_thread_colors[thread_id]
python
def color_for_thread(thread_id): """ Associates the thread ID with the next color in the `thread_colors` cycle, so that thread-specific parts of a log have a consistent separate color. """ if thread_id not in seen_thread_colors: seen_thread_colors[thread_id] = next(thread_colors) return seen_thread_colors[thread_id]
[ "def", "color_for_thread", "(", "thread_id", ")", ":", "if", "thread_id", "not", "in", "seen_thread_colors", ":", "seen_thread_colors", "[", "thread_id", "]", "=", "next", "(", "thread_colors", ")", "return", "seen_thread_colors", "[", "thread_id", "]" ]
Associates the thread ID with the next color in the `thread_colors` cycle, so that thread-specific parts of a log have a consistent separate color.
[ "Associates", "the", "thread", "ID", "with", "the", "next", "color", "in", "the", "thread_colors", "cycle", "so", "that", "thread", "-", "specific", "parts", "of", "a", "log", "have", "a", "consistent", "separate", "color", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L64-L72
wglass/lighthouse
lighthouse/log/cli.py
CLIHandler.format
def format(self, record): """ Formats a given log record to include the timestamp, log level, thread ID and message. Colorized if coloring is available. """ if not self.is_tty: return super(CLIHandler, self).format(record) level_abbrev = record.levelname[0] time_and_level = color_string( color_for_level(record.levelno), "[%(asctime)s " + level_abbrev + "]" ) thread = color_string( color_for_thread(record.thread), "[%(threadName)s]" ) formatter = logging.Formatter( time_and_level + thread + " %(message)s", "%Y-%m-%d %H:%M:%S" ) return formatter.format(record)
python
def format(self, record): """ Formats a given log record to include the timestamp, log level, thread ID and message. Colorized if coloring is available. """ if not self.is_tty: return super(CLIHandler, self).format(record) level_abbrev = record.levelname[0] time_and_level = color_string( color_for_level(record.levelno), "[%(asctime)s " + level_abbrev + "]" ) thread = color_string( color_for_thread(record.thread), "[%(threadName)s]" ) formatter = logging.Formatter( time_and_level + thread + " %(message)s", "%Y-%m-%d %H:%M:%S" ) return formatter.format(record)
[ "def", "format", "(", "self", ",", "record", ")", ":", "if", "not", "self", ".", "is_tty", ":", "return", "super", "(", "CLIHandler", ",", "self", ")", ".", "format", "(", "record", ")", "level_abbrev", "=", "record", ".", "levelname", "[", "0", "]",...
Formats a given log record to include the timestamp, log level, thread ID and message. Colorized if coloring is available.
[ "Formats", "a", "given", "log", "record", "to", "include", "the", "timestamp", "log", "level", "thread", "ID", "and", "message", ".", "Colorized", "if", "coloring", "is", "available", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L87-L109
mistio/mist.client
src/mistcommand/helpers/sync.py
add_bare_metal_cloud
def add_bare_metal_cloud(client, cloud, keys): """ Black magic is happening here. All of this wil change when we sanitize our API, however, this works until then """ title = cloud.get('title') provider = cloud.get('provider') key = cloud.get('apikey', "") secret = cloud.get('apisecret', "") tenant_name = cloud.get('tenant_name', "") region = cloud.get('region', "") apiurl = cloud.get('apiurl', "") compute_endpoint = cloud.get('compute_endpoint', None) machine_ip = cloud.get('machine_ip', None) machine_key = cloud.get('machine_key', None) machine_user = cloud.get('machine_user', None) machine_port = cloud.get('machine_port', None) if provider == "bare_metal": machine_ids = cloud['machines'].keys() bare_machine = cloud['machines'][machine_ids[0]] machine_hostname = bare_machine.get('dns_name', None) if not machine_hostname: machine_hostname = bare_machine['public_ips'][0] if not machine_ip: machine_ip = machine_hostname key = machine_hostname machine_name = cloud['machines'][machine_ids[0]]['name'] machine_id = machine_ids[0] keypairs = keys.keys() for i in keypairs: keypair_machines = keys[i]['machines'] if keypair_machines: keypair_machs = keys[i]['machines'] for mach in keypair_machs: if mach[1] == machine_id: machine_key = i break else: pass client.add_cloud(title, provider, key, secret, tenant_name=tenant_name, region=region, apiurl=apiurl, machine_ip=machine_ip, machine_key=machine_key, machine_user=machine_user, compute_endpoint=compute_endpoint, machine_port=machine_port)
python
def add_bare_metal_cloud(client, cloud, keys): """ Black magic is happening here. All of this wil change when we sanitize our API, however, this works until then """ title = cloud.get('title') provider = cloud.get('provider') key = cloud.get('apikey', "") secret = cloud.get('apisecret', "") tenant_name = cloud.get('tenant_name', "") region = cloud.get('region', "") apiurl = cloud.get('apiurl', "") compute_endpoint = cloud.get('compute_endpoint', None) machine_ip = cloud.get('machine_ip', None) machine_key = cloud.get('machine_key', None) machine_user = cloud.get('machine_user', None) machine_port = cloud.get('machine_port', None) if provider == "bare_metal": machine_ids = cloud['machines'].keys() bare_machine = cloud['machines'][machine_ids[0]] machine_hostname = bare_machine.get('dns_name', None) if not machine_hostname: machine_hostname = bare_machine['public_ips'][0] if not machine_ip: machine_ip = machine_hostname key = machine_hostname machine_name = cloud['machines'][machine_ids[0]]['name'] machine_id = machine_ids[0] keypairs = keys.keys() for i in keypairs: keypair_machines = keys[i]['machines'] if keypair_machines: keypair_machs = keys[i]['machines'] for mach in keypair_machs: if mach[1] == machine_id: machine_key = i break else: pass client.add_cloud(title, provider, key, secret, tenant_name=tenant_name, region=region, apiurl=apiurl, machine_ip=machine_ip, machine_key=machine_key, machine_user=machine_user, compute_endpoint=compute_endpoint, machine_port=machine_port)
[ "def", "add_bare_metal_cloud", "(", "client", ",", "cloud", ",", "keys", ")", ":", "title", "=", "cloud", ".", "get", "(", "'title'", ")", "provider", "=", "cloud", ".", "get", "(", "'provider'", ")", "key", "=", "cloud", ".", "get", "(", "'apikey'", ...
Black magic is happening here. All of this wil change when we sanitize our API, however, this works until then
[ "Black", "magic", "is", "happening", "here", ".", "All", "of", "this", "wil", "change", "when", "we", "sanitize", "our", "API", "however", "this", "works", "until", "then" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistcommand/helpers/sync.py#L61-L105
mistio/mist.client
src/mistcommand/helpers/sync.py
associate_keys
def associate_keys(user_dict, client): """ This whole function is black magic, had to however cause of the way we keep key-machine association """ added_keys = user_dict['keypairs'] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key]['machines'] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get('public_ips', None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
python
def associate_keys(user_dict, client): """ This whole function is black magic, had to however cause of the way we keep key-machine association """ added_keys = user_dict['keypairs'] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key]['machines'] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get('public_ips', None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
[ "def", "associate_keys", "(", "user_dict", ",", "client", ")", ":", "added_keys", "=", "user_dict", "[", "'keypairs'", "]", "print", "\">>>Updating Keys-Machines association\"", "for", "key", "in", "added_keys", ":", "machines", "=", "added_keys", "[", "key", "]",...
This whole function is black magic, had to however cause of the way we keep key-machine association
[ "This", "whole", "function", "is", "black", "magic", "had", "to", "however", "cause", "of", "the", "way", "we", "keep", "key", "-", "machine", "association" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistcommand/helpers/sync.py#L150-L182
ucbvislab/radiotool
radiotool/composer/fade.py
Fade.to_array
def to_array(self, channels=2): """Generate the array of volume multipliers for the dynamic""" if self.fade_type == "linear": return np.linspace(self.in_volume, self.out_volume, self.duration * channels)\ .reshape(self.duration, channels) elif self.fade_type == "exponential": if self.in_volume < self.out_volume: return (np.logspace(8, 1, self.duration * channels, base=.5) * ( self.out_volume - self.in_volume) / 0.5 + self.in_volume).reshape(self.duration, channels) else: return (np.logspace(1, 8, self.duration * channels, base=.5 ) * (self.in_volume - self.out_volume) / 0.5 + self.out_volume).reshape(self.duration, channels) elif self.fade_type == "cosine": return
python
def to_array(self, channels=2): """Generate the array of volume multipliers for the dynamic""" if self.fade_type == "linear": return np.linspace(self.in_volume, self.out_volume, self.duration * channels)\ .reshape(self.duration, channels) elif self.fade_type == "exponential": if self.in_volume < self.out_volume: return (np.logspace(8, 1, self.duration * channels, base=.5) * ( self.out_volume - self.in_volume) / 0.5 + self.in_volume).reshape(self.duration, channels) else: return (np.logspace(1, 8, self.duration * channels, base=.5 ) * (self.in_volume - self.out_volume) / 0.5 + self.out_volume).reshape(self.duration, channels) elif self.fade_type == "cosine": return
[ "def", "to_array", "(", "self", ",", "channels", "=", "2", ")", ":", "if", "self", ".", "fade_type", "==", "\"linear\"", ":", "return", "np", ".", "linspace", "(", "self", ".", "in_volume", ",", "self", ".", "out_volume", ",", "self", ".", "duration", ...
Generate the array of volume multipliers for the dynamic
[ "Generate", "the", "array", "of", "volume", "multipliers", "for", "the", "dynamic" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/fade.py#L33-L50
alvarogzp/python-sqlite-framework
sqlite_framework/component/component.py
SqliteStorageComponent.sql
def sql(self, sql: str, *qmark_params, **named_params): """ :deprecated: use self.statement to execute properly-formatted sql statements """ statement = SingleSqlStatement(sql) return self.statement(statement).execute(*qmark_params, **named_params)
python
def sql(self, sql: str, *qmark_params, **named_params): """ :deprecated: use self.statement to execute properly-formatted sql statements """ statement = SingleSqlStatement(sql) return self.statement(statement).execute(*qmark_params, **named_params)
[ "def", "sql", "(", "self", ",", "sql", ":", "str", ",", "*", "qmark_params", ",", "*", "*", "named_params", ")", ":", "statement", "=", "SingleSqlStatement", "(", "sql", ")", "return", "self", ".", "statement", "(", "statement", ")", ".", "execute", "(...
:deprecated: use self.statement to execute properly-formatted sql statements
[ ":", "deprecated", ":", "use", "self", ".", "statement", "to", "execute", "properly", "-", "formatted", "sql", "statements" ]
train
https://github.com/alvarogzp/python-sqlite-framework/blob/29db97a64f95cfe13eb7bae1d00b624b5a37b152/sqlite_framework/component/component.py#L60-L65
alvarogzp/python-sqlite-framework
sqlite_framework/component/component.py
SqliteStorageComponent._sql
def _sql(self, sql: str, params=()): """ :deprecated: use self.sql instead """ statement = SingleSqlStatement(sql) return self.statement(statement).execute_for_params(params).cursor
python
def _sql(self, sql: str, params=()): """ :deprecated: use self.sql instead """ statement = SingleSqlStatement(sql) return self.statement(statement).execute_for_params(params).cursor
[ "def", "_sql", "(", "self", ",", "sql", ":", "str", ",", "params", "=", "(", ")", ")", ":", "statement", "=", "SingleSqlStatement", "(", "sql", ")", "return", "self", ".", "statement", "(", "statement", ")", ".", "execute_for_params", "(", "params", ")...
:deprecated: use self.sql instead
[ ":", "deprecated", ":", "use", "self", ".", "sql", "instead" ]
train
https://github.com/alvarogzp/python-sqlite-framework/blob/29db97a64f95cfe13eb7bae1d00b624b5a37b152/sqlite_framework/component/component.py#L67-L72
davebridges/mousedb
mousedb/timed_mating/models.py
PlugEvents.save
def save(self): """Over-rides the default save function for PlugEvents. If a sacrifice date is set for an object in this model, then Active is set to False.""" if self.SacrificeDate: self.Active = False super(PlugEvents, self).save()
python
def save(self): """Over-rides the default save function for PlugEvents. If a sacrifice date is set for an object in this model, then Active is set to False.""" if self.SacrificeDate: self.Active = False super(PlugEvents, self).save()
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "SacrificeDate", ":", "self", ".", "Active", "=", "False", "super", "(", "PlugEvents", ",", "self", ")", ".", "save", "(", ")" ]
Over-rides the default save function for PlugEvents. If a sacrifice date is set for an object in this model, then Active is set to False.
[ "Over", "-", "rides", "the", "default", "save", "function", "for", "PlugEvents", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/timed_mating/models.py#L38-L44
darkfeline/animanager
animanager/commands/show.py
command
def command(state, args): """Show anime data.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') anime = query.select.lookup(state.db, aid, episode_fields=args.episode_fields) complete_string = 'yes' if anime.complete else 'no' print(SHOW_MSG.format( anime.aid, anime.title, anime.type, anime.watched_episodes, anime.episodecount, datets.to_date(anime.startdate) if anime.startdate else 'N/A', datets.to_date(anime.enddate) if anime.enddate else 'N/A', complete_string, )) if anime.regexp: print('Watching regexp: {}'.format(anime.regexp)) if hasattr(anime, 'episodes'): episodes = sorted(anime.episodes, key=lambda x: (x.type, x.number)) print('\n', tabulate( ( ( EpisodeTypes.from_db(state.db).get_epno(episode), episode.title, episode.length, 'yes' if episode.user_watched else '', ) for episode in episodes ), headers=['Number', 'Title', 'min', 'Watched'], ))
python
def command(state, args): """Show anime data.""" args = parser.parse_args(args[1:]) aid = state.results.parse_aid(args.aid, default_key='db') anime = query.select.lookup(state.db, aid, episode_fields=args.episode_fields) complete_string = 'yes' if anime.complete else 'no' print(SHOW_MSG.format( anime.aid, anime.title, anime.type, anime.watched_episodes, anime.episodecount, datets.to_date(anime.startdate) if anime.startdate else 'N/A', datets.to_date(anime.enddate) if anime.enddate else 'N/A', complete_string, )) if anime.regexp: print('Watching regexp: {}'.format(anime.regexp)) if hasattr(anime, 'episodes'): episodes = sorted(anime.episodes, key=lambda x: (x.type, x.number)) print('\n', tabulate( ( ( EpisodeTypes.from_db(state.db).get_epno(episode), episode.title, episode.length, 'yes' if episode.user_watched else '', ) for episode in episodes ), headers=['Number', 'Title', 'min', 'Watched'], ))
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "aid", "=", "state", ".", "results", ".", "parse_aid", "(", "args", ".", "aid", ",", "default_key", "=", "'db'", ...
Show anime data.
[ "Show", "anime", "data", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/show.py#L26-L58
mozilla/parquet2hive
parquet2hive_modules/parquet2hivelib.py
load_prefix
def load_prefix(s3_loc, success_only=None, recent_versions=None, exclude_regex=None, just_sql=False): """Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix """ bucket_name, prefix = _get_bucket_and_prefix(s3_loc) datasets = _get_common_prefixes(bucket_name, prefix) bash_cmd = '' for dataset in datasets: dataset = _remove_trailing_backslash(dataset) try: bash_cmd += get_bash_cmd('s3://{}/{}'.format(bucket_name, dataset), success_only=success_only, recent_versions=recent_versions, exclude_regex=exclude_regex, just_sql=just_sql) except Exception as e: sys.stderr.write('Failed to process {}, {}\n'.format(dataset, str(e))) return bash_cmd
python
def load_prefix(s3_loc, success_only=None, recent_versions=None, exclude_regex=None, just_sql=False): """Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix """ bucket_name, prefix = _get_bucket_and_prefix(s3_loc) datasets = _get_common_prefixes(bucket_name, prefix) bash_cmd = '' for dataset in datasets: dataset = _remove_trailing_backslash(dataset) try: bash_cmd += get_bash_cmd('s3://{}/{}'.format(bucket_name, dataset), success_only=success_only, recent_versions=recent_versions, exclude_regex=exclude_regex, just_sql=just_sql) except Exception as e: sys.stderr.write('Failed to process {}, {}\n'.format(dataset, str(e))) return bash_cmd
[ "def", "load_prefix", "(", "s3_loc", ",", "success_only", "=", "None", ",", "recent_versions", "=", "None", ",", "exclude_regex", "=", "None", ",", "just_sql", "=", "False", ")", ":", "bucket_name", ",", "prefix", "=", "_get_bucket_and_prefix", "(", "s3_loc", ...
Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix
[ "Get", "a", "bash", "command", "which", "will", "load", "every", "dataset", "in", "a", "bucket", "at", "a", "prefix", "." ]
train
https://github.com/mozilla/parquet2hive/blob/34b33b36294c011ff187893e8412e203fb1ccf0e/parquet2hive_modules/parquet2hivelib.py#L42-L63
peterldowns/python-mustache
mustache/loading.py
read_unicode
def read_unicode(path, encoding, encoding_errors): """ Return the contents of a file as a unicode string. """ try: f = open(path, 'rb') return make_unicode(f.read(), encoding, encoding_errors) finally: f.close()
python
def read_unicode(path, encoding, encoding_errors): """ Return the contents of a file as a unicode string. """ try: f = open(path, 'rb') return make_unicode(f.read(), encoding, encoding_errors) finally: f.close()
[ "def", "read_unicode", "(", "path", ",", "encoding", ",", "encoding_errors", ")", ":", "try", ":", "f", "=", "open", "(", "path", ",", "'rb'", ")", "return", "make_unicode", "(", "f", ".", "read", "(", ")", ",", "encoding", ",", "encoding_errors", ")",...
Return the contents of a file as a unicode string.
[ "Return", "the", "contents", "of", "a", "file", "as", "a", "unicode", "string", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/loading.py#L18-L24
peterldowns/python-mustache
mustache/loading.py
get_abs_template_path
def get_abs_template_path(template_name, directory, extension): """ Given a template name, a directory, and an extension, return the absolute path to the template. """ # Get the relative path relative_path = join(directory, template_name) file_with_ext = template_name if extension: # If there is a default extension, but no file extension, then add it file_name, file_ext = splitext(file_with_ext) if not file_ext: file_with_ext = extsep.join( (file_name, extension.replace(extsep, ''))) # Rebuild the relative path relative_path = join(directory, file_with_ext) return abspath(relative_path)
python
def get_abs_template_path(template_name, directory, extension): """ Given a template name, a directory, and an extension, return the absolute path to the template. """ # Get the relative path relative_path = join(directory, template_name) file_with_ext = template_name if extension: # If there is a default extension, but no file extension, then add it file_name, file_ext = splitext(file_with_ext) if not file_ext: file_with_ext = extsep.join( (file_name, extension.replace(extsep, ''))) # Rebuild the relative path relative_path = join(directory, file_with_ext) return abspath(relative_path)
[ "def", "get_abs_template_path", "(", "template_name", ",", "directory", ",", "extension", ")", ":", "# Get the relative path", "relative_path", "=", "join", "(", "directory", ",", "template_name", ")", "file_with_ext", "=", "template_name", "if", "extension", ":", "...
Given a template name, a directory, and an extension, return the absolute path to the template.
[ "Given", "a", "template", "name", "a", "directory", "and", "an", "extension", "return", "the", "absolute", "path", "to", "the", "template", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/loading.py#L26-L42
peterldowns/python-mustache
mustache/loading.py
load_file
def load_file(path, encoding, encoding_errors): """ Given an existing path, attempt to load it as a unicode string. """ abs_path = abspath(path) if exists(abs_path): return read_unicode(abs_path, encoding, encoding_errors) raise IOError('File %s does not exist' % (abs_path))
python
def load_file(path, encoding, encoding_errors): """ Given an existing path, attempt to load it as a unicode string. """ abs_path = abspath(path) if exists(abs_path): return read_unicode(abs_path, encoding, encoding_errors) raise IOError('File %s does not exist' % (abs_path))
[ "def", "load_file", "(", "path", ",", "encoding", ",", "encoding_errors", ")", ":", "abs_path", "=", "abspath", "(", "path", ")", "if", "exists", "(", "abs_path", ")", ":", "return", "read_unicode", "(", "abs_path", ",", "encoding", ",", "encoding_errors", ...
Given an existing path, attempt to load it as a unicode string.
[ "Given", "an", "existing", "path", "attempt", "to", "load", "it", "as", "a", "unicode", "string", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/loading.py#L44-L49
peterldowns/python-mustache
mustache/loading.py
load_template
def load_template(name, directory, extension, encoding, encoding_errors): """ Load a template and return its contents as a unicode string. """ abs_path = get_abs_template_path(name, directory, extension) return load_file(abs_path, encoding, encoding_errors)
python
def load_template(name, directory, extension, encoding, encoding_errors): """ Load a template and return its contents as a unicode string. """ abs_path = get_abs_template_path(name, directory, extension) return load_file(abs_path, encoding, encoding_errors)
[ "def", "load_template", "(", "name", ",", "directory", ",", "extension", ",", "encoding", ",", "encoding_errors", ")", ":", "abs_path", "=", "get_abs_template_path", "(", "name", ",", "directory", ",", "extension", ")", "return", "load_file", "(", "abs_path", ...
Load a template and return its contents as a unicode string.
[ "Load", "a", "template", "and", "return", "its", "contents", "as", "a", "unicode", "string", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/loading.py#L51-L54
phalt/beckett
beckett/clients.py
HTTPClient.prepare_http_request
def prepare_http_request(self, method_type, params, **kwargs): """ Prepares the HTTP REQUEST and returns it. Args: method_type: The HTTP method type params: Additional parameters for the HTTP request. kwargs: Any extra keyword arguements passed into a client method. returns: prepared_request: An HTTP request object. """ prepared_request = self.session.prepare_request( requests.Request(method=method_type, **params) ) return prepared_request
python
def prepare_http_request(self, method_type, params, **kwargs): """ Prepares the HTTP REQUEST and returns it. Args: method_type: The HTTP method type params: Additional parameters for the HTTP request. kwargs: Any extra keyword arguements passed into a client method. returns: prepared_request: An HTTP request object. """ prepared_request = self.session.prepare_request( requests.Request(method=method_type, **params) ) return prepared_request
[ "def", "prepare_http_request", "(", "self", ",", "method_type", ",", "params", ",", "*", "*", "kwargs", ")", ":", "prepared_request", "=", "self", ".", "session", ".", "prepare_request", "(", "requests", ".", "Request", "(", "method", "=", "method_type", ","...
Prepares the HTTP REQUEST and returns it. Args: method_type: The HTTP method type params: Additional parameters for the HTTP request. kwargs: Any extra keyword arguements passed into a client method. returns: prepared_request: An HTTP request object.
[ "Prepares", "the", "HTTP", "REQUEST", "and", "returns", "it", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L21-L36
phalt/beckett
beckett/clients.py
HTTPClient.call_api
def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs): """ Make HTTP calls. Args: method_type: The HTTP method method_name: The name of the python method making the HTTP call valid_status_codes: A tuple of integer status codes deemed acceptable as response statuses resource: The resource class that will be generated data: The post data being sent. uid: The unique identifier of the resource. Returns: kwargs is a list of keyword arguments. Additional custom keyword arguments can be sent into this method and will be passed into subclass methods: - get_url - prepare_http_request - get_http_headers """ url = resource.get_resource_url( resource, base_url=self.Meta.base_url ) if method_type in SINGLE_RESOURCE_METHODS: if not uid and not kwargs: raise MissingUidException url = resource.get_url( url=url, uid=uid, **kwargs) params = { 'headers': self.get_http_headers( self.Meta.name, method_name, **kwargs), 'url': url } if method_type in ['POST', 'PUT', 'PATCH'] and isinstance(data, dict): params.update(json=data) prepared_request = self.prepare_http_request( method_type, params, **kwargs) response = self.session.send(prepared_request) return self._handle_response(response, valid_status_codes, resource)
python
def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs): """ Make HTTP calls. Args: method_type: The HTTP method method_name: The name of the python method making the HTTP call valid_status_codes: A tuple of integer status codes deemed acceptable as response statuses resource: The resource class that will be generated data: The post data being sent. uid: The unique identifier of the resource. Returns: kwargs is a list of keyword arguments. Additional custom keyword arguments can be sent into this method and will be passed into subclass methods: - get_url - prepare_http_request - get_http_headers """ url = resource.get_resource_url( resource, base_url=self.Meta.base_url ) if method_type in SINGLE_RESOURCE_METHODS: if not uid and not kwargs: raise MissingUidException url = resource.get_url( url=url, uid=uid, **kwargs) params = { 'headers': self.get_http_headers( self.Meta.name, method_name, **kwargs), 'url': url } if method_type in ['POST', 'PUT', 'PATCH'] and isinstance(data, dict): params.update(json=data) prepared_request = self.prepare_http_request( method_type, params, **kwargs) response = self.session.send(prepared_request) return self._handle_response(response, valid_status_codes, resource)
[ "def", "call_api", "(", "self", ",", "method_type", ",", "method_name", ",", "valid_status_codes", ",", "resource", ",", "data", ",", "uid", ",", "*", "*", "kwargs", ")", ":", "url", "=", "resource", ".", "get_resource_url", "(", "resource", ",", "base_url...
Make HTTP calls. Args: method_type: The HTTP method method_name: The name of the python method making the HTTP call valid_status_codes: A tuple of integer status codes deemed acceptable as response statuses resource: The resource class that will be generated data: The post data being sent. uid: The unique identifier of the resource. Returns: kwargs is a list of keyword arguments. Additional custom keyword arguments can be sent into this method and will be passed into subclass methods: - get_url - prepare_http_request - get_http_headers
[ "Make", "HTTP", "calls", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L57-L99
phalt/beckett
beckett/clients.py
HTTPClient._handle_response
def _handle_response(self, response, valid_status_codes, resource): """ Handles Response objects Args: response: An HTTP reponse object valid_status_codes: A tuple list of valid status codes resource: The resource class to build from this response returns: resources: A list of Resource instances """ if response.status_code not in valid_status_codes: raise InvalidStatusCodeError( status_code=response.status_code, expected_status_codes=valid_status_codes ) if response.content: data = response.json() if isinstance(data, list): # A list of results is always rendered return [resource(**x) for x in data] else: # Try and find the paginated resources key = getattr(resource.Meta, 'pagination_key', None) if isinstance(data.get(key), list): # Only return the paginated responses return [resource(**x) for x in data.get(key)] else: # Attempt to render this whole response as a resource return [resource(**data)] return []
python
def _handle_response(self, response, valid_status_codes, resource): """ Handles Response objects Args: response: An HTTP reponse object valid_status_codes: A tuple list of valid status codes resource: The resource class to build from this response returns: resources: A list of Resource instances """ if response.status_code not in valid_status_codes: raise InvalidStatusCodeError( status_code=response.status_code, expected_status_codes=valid_status_codes ) if response.content: data = response.json() if isinstance(data, list): # A list of results is always rendered return [resource(**x) for x in data] else: # Try and find the paginated resources key = getattr(resource.Meta, 'pagination_key', None) if isinstance(data.get(key), list): # Only return the paginated responses return [resource(**x) for x in data.get(key)] else: # Attempt to render this whole response as a resource return [resource(**data)] return []
[ "def", "_handle_response", "(", "self", ",", "response", ",", "valid_status_codes", ",", "resource", ")", ":", "if", "response", ".", "status_code", "not", "in", "valid_status_codes", ":", "raise", "InvalidStatusCodeError", "(", "status_code", "=", "response", "."...
Handles Response objects Args: response: An HTTP reponse object valid_status_codes: A tuple list of valid status codes resource: The resource class to build from this response returns: resources: A list of Resource instances
[ "Handles", "Response", "objects" ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L101-L132
phalt/beckett
beckett/clients.py
HTTPHypermediaClient._call_api_single_related_resource
def _call_api_single_related_resource(self, resource, full_resource_url, method_name, **kwargs): """ For HypermediaResource - make an API call to a known URL """ url = full_resource_url params = { 'headers': self.get_http_headers( resource.Meta.name, method_name, **kwargs), 'url': url } prepared_request = self.prepare_http_request( 'GET', params, **kwargs) response = self.session.send(prepared_request) return self._handle_response( response, resource.Meta.valid_status_codes, resource)
python
def _call_api_single_related_resource(self, resource, full_resource_url, method_name, **kwargs): """ For HypermediaResource - make an API call to a known URL """ url = full_resource_url params = { 'headers': self.get_http_headers( resource.Meta.name, method_name, **kwargs), 'url': url } prepared_request = self.prepare_http_request( 'GET', params, **kwargs) response = self.session.send(prepared_request) return self._handle_response( response, resource.Meta.valid_status_codes, resource)
[ "def", "_call_api_single_related_resource", "(", "self", ",", "resource", ",", "full_resource_url", ",", "method_name", ",", "*", "*", "kwargs", ")", ":", "url", "=", "full_resource_url", "params", "=", "{", "'headers'", ":", "self", ".", "get_http_headers", "("...
For HypermediaResource - make an API call to a known URL
[ "For", "HypermediaResource", "-", "make", "an", "API", "call", "to", "a", "known", "URL" ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L142-L157
phalt/beckett
beckett/clients.py
HTTPHypermediaClient._call_api_many_related_resources
def _call_api_many_related_resources(self, resource, url_list, method_name, **kwargs): """ For HypermediaResource - make an API call to a list of known URLs """ responses = [] for url in url_list: params = { 'headers': self.get_http_headers( resource.Meta.name, method_name, **kwargs), 'url': url } prepared_request = self.prepare_http_request( 'GET', params, **kwargs) response = self.session.send(prepared_request) result = self._handle_response( response, resource.Meta.valid_status_codes, resource) if len(result) > 1: responses.append(result) else: responses.append(result[0]) return responses
python
def _call_api_many_related_resources(self, resource, url_list, method_name, **kwargs): """ For HypermediaResource - make an API call to a list of known URLs """ responses = [] for url in url_list: params = { 'headers': self.get_http_headers( resource.Meta.name, method_name, **kwargs), 'url': url } prepared_request = self.prepare_http_request( 'GET', params, **kwargs) response = self.session.send(prepared_request) result = self._handle_response( response, resource.Meta.valid_status_codes, resource) if len(result) > 1: responses.append(result) else: responses.append(result[0]) return responses
[ "def", "_call_api_many_related_resources", "(", "self", ",", "resource", ",", "url_list", ",", "method_name", ",", "*", "*", "kwargs", ")", ":", "responses", "=", "[", "]", "for", "url", "in", "url_list", ":", "params", "=", "{", "'headers'", ":", "self", ...
For HypermediaResource - make an API call to a list of known URLs
[ "For", "HypermediaResource", "-", "make", "an", "API", "call", "to", "a", "list", "of", "known", "URLs" ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L159-L180
phalt/beckett
beckett/clients.py
BaseClient.assign_methods
def assign_methods(self, resource_class): """ Given a resource_class and it's Meta.methods tuple, assign methods for communicating with that resource. Args: resource_class: A single resource class """ assert all([ x.upper() in VALID_METHODS for x in resource_class.Meta.methods]) for method in resource_class.Meta.methods: self._assign_method( resource_class, method.upper() )
python
def assign_methods(self, resource_class): """ Given a resource_class and it's Meta.methods tuple, assign methods for communicating with that resource. Args: resource_class: A single resource class """ assert all([ x.upper() in VALID_METHODS for x in resource_class.Meta.methods]) for method in resource_class.Meta.methods: self._assign_method( resource_class, method.upper() )
[ "def", "assign_methods", "(", "self", ",", "resource_class", ")", ":", "assert", "all", "(", "[", "x", ".", "upper", "(", ")", "in", "VALID_METHODS", "for", "x", "in", "resource_class", ".", "Meta", ".", "methods", "]", ")", "for", "method", "in", "res...
Given a resource_class and it's Meta.methods tuple, assign methods for communicating with that resource. Args: resource_class: A single resource class
[ "Given", "a", "resource_class", "and", "it", "s", "Meta", ".", "methods", "tuple", "assign", "methods", "for", "communicating", "with", "that", "resource", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L212-L227
phalt/beckett
beckett/clients.py
BaseClient._assign_method
def _assign_method(self, resource_class, method_type): """ Using reflection, assigns a new method to this class. Args: resource_class: A resource class method_type: The HTTP method type """ """ If we assigned the same method to each method, it's the same method in memory, so we need one for each acceptable HTTP method. """ method_name = resource_class.get_method_name( resource_class, method_type) valid_status_codes = getattr( resource_class.Meta, 'valid_status_codes', DEFAULT_VALID_STATUS_CODES ) # I know what you're going to say, and I'd love help making this nicer # reflection assigns the same memory addr to each method otherwise. def get(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def put(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def post(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def patch(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def delete(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) method_map = { 'GET': get, 'PUT': put, 'POST': post, 'PATCH': patch, 'DELETE': delete } setattr( self, method_name, types.MethodType(method_map[method_type], self) )
python
def _assign_method(self, resource_class, method_type): """ Using reflection, assigns a new method to this class. Args: resource_class: A resource class method_type: The HTTP method type """ """ If we assigned the same method to each method, it's the same method in memory, so we need one for each acceptable HTTP method. """ method_name = resource_class.get_method_name( resource_class, method_type) valid_status_codes = getattr( resource_class.Meta, 'valid_status_codes', DEFAULT_VALID_STATUS_CODES ) # I know what you're going to say, and I'd love help making this nicer # reflection assigns the same memory addr to each method otherwise. def get(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def put(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def post(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def patch(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def delete(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api( method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) method_map = { 'GET': get, 'PUT': put, 'POST': post, 'PATCH': patch, 'DELETE': delete } setattr( self, method_name, types.MethodType(method_map[method_type], self) )
[ "def", "_assign_method", "(", "self", ",", "resource_class", ",", "method_type", ")", ":", "\"\"\"\n If we assigned the same method to each method, it's the same\n method in memory, so we need one for each acceptable HTTP method.\n \"\"\"", "method_name", "=", "resourc...
Using reflection, assigns a new method to this class. Args: resource_class: A resource class method_type: The HTTP method type
[ "Using", "reflection", "assigns", "a", "new", "method", "to", "this", "class", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L229-L303
darkfeline/animanager
animanager/commands/purgecache.py
command
def command(state, args): """Purge all caches.""" state.cache_manager.teardown() state.cache_manager.setup() EpisodeTypes.forget(state.db) del state.file_picker
python
def command(state, args): """Purge all caches.""" state.cache_manager.teardown() state.cache_manager.setup() EpisodeTypes.forget(state.db) del state.file_picker
[ "def", "command", "(", "state", ",", "args", ")", ":", "state", ".", "cache_manager", ".", "teardown", "(", ")", "state", ".", "cache_manager", ".", "setup", "(", ")", "EpisodeTypes", ".", "forget", "(", "state", ".", "db", ")", "del", "state", ".", ...
Purge all caches.
[ "Purge", "all", "caches", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/purgecache.py#L21-L26
darkfeline/animanager
animanager/cmd/results/results.py
Results.append
def append(self, row): """Append a result row and check its length. >>> x = Results(['title', 'type']) >>> x.append(('Konosuba', 'TV')) >>> x Results(['title', 'type'], [('Konosuba', 'TV')]) >>> x.append(('Konosuba',)) Traceback (most recent call last): ... ValueError: Wrong result row length """ row = tuple(row) if len(row) != self.table_width: raise ValueError('Wrong result row length') self.results.append(row)
python
def append(self, row): """Append a result row and check its length. >>> x = Results(['title', 'type']) >>> x.append(('Konosuba', 'TV')) >>> x Results(['title', 'type'], [('Konosuba', 'TV')]) >>> x.append(('Konosuba',)) Traceback (most recent call last): ... ValueError: Wrong result row length """ row = tuple(row) if len(row) != self.table_width: raise ValueError('Wrong result row length') self.results.append(row)
[ "def", "append", "(", "self", ",", "row", ")", ":", "row", "=", "tuple", "(", "row", ")", "if", "len", "(", "row", ")", "!=", "self", ".", "table_width", ":", "raise", "ValueError", "(", "'Wrong result row length'", ")", "self", ".", "results", ".", ...
Append a result row and check its length. >>> x = Results(['title', 'type']) >>> x.append(('Konosuba', 'TV')) >>> x Results(['title', 'type'], [('Konosuba', 'TV')]) >>> x.append(('Konosuba',)) Traceback (most recent call last): ... ValueError: Wrong result row length
[ "Append", "a", "result", "row", "and", "check", "its", "length", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/cmd/results/results.py#L50-L67
darkfeline/animanager
animanager/cmd/results/results.py
Results.set
def set(self, results): """Set results. results is an iterable of tuples, where each tuple is a row of results. >>> x = Results(['title']) >>> x.set([('Konosuba',), ('Oreimo',)]) >>> x Results(['title'], [('Konosuba',), ('Oreimo',)]) """ self.results = list() for row in results: self.append(row)
python
def set(self, results): """Set results. results is an iterable of tuples, where each tuple is a row of results. >>> x = Results(['title']) >>> x.set([('Konosuba',), ('Oreimo',)]) >>> x Results(['title'], [('Konosuba',), ('Oreimo',)]) """ self.results = list() for row in results: self.append(row)
[ "def", "set", "(", "self", ",", "results", ")", ":", "self", ".", "results", "=", "list", "(", ")", "for", "row", "in", "results", ":", "self", ".", "append", "(", "row", ")" ]
Set results. results is an iterable of tuples, where each tuple is a row of results. >>> x = Results(['title']) >>> x.set([('Konosuba',), ('Oreimo',)]) >>> x Results(['title'], [('Konosuba',), ('Oreimo',)])
[ "Set", "results", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/cmd/results/results.py#L69-L82
darkfeline/animanager
animanager/cmd/results/results.py
Results.print
def print(self): """Print results table. >>> Results(['title'], [('Konosuba',), ('Oreimo',)]).print() # title --- -------- 1 Konosuba 2 Oreimo """ print(tabulate( ((i, *row) for i, row in enumerate(self.results, 1)), headers=self.headers, ))
python
def print(self): """Print results table. >>> Results(['title'], [('Konosuba',), ('Oreimo',)]).print() # title --- -------- 1 Konosuba 2 Oreimo """ print(tabulate( ((i, *row) for i, row in enumerate(self.results, 1)), headers=self.headers, ))
[ "def", "print", "(", "self", ")", ":", "print", "(", "tabulate", "(", "(", "(", "i", ",", "*", "row", ")", "for", "i", ",", "row", "in", "enumerate", "(", "self", ".", "results", ",", "1", ")", ")", ",", "headers", "=", "self", ".", "headers", ...
Print results table. >>> Results(['title'], [('Konosuba',), ('Oreimo',)]).print() # title --- -------- 1 Konosuba 2 Oreimo
[ "Print", "results", "table", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/cmd/results/results.py#L100-L113
darkfeline/animanager
animanager/commands/unregister.py
command
def command(state, args): """Unregister watching regexp for an anime.""" args = parser.parse_args(args[1:]) if args.complete: query.files.delete_regexp_complete(state.db) else: if args.aid is None: parser.print_help() else: aid = state.results.parse_aid(args.aid, default_key='db') query.files.delete_regexp(state.db, aid)
python
def command(state, args): """Unregister watching regexp for an anime.""" args = parser.parse_args(args[1:]) if args.complete: query.files.delete_regexp_complete(state.db) else: if args.aid is None: parser.print_help() else: aid = state.results.parse_aid(args.aid, default_key='db') query.files.delete_regexp(state.db, aid)
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "if", "args", ".", "complete", ":", "query", ".", "files", ".", "delete_regexp_complete", "(", "state", ".", "db", ...
Unregister watching regexp for an anime.
[ "Unregister", "watching", "regexp", "for", "an", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/unregister.py#L22-L32
ucbvislab/radiotool
radiotool/algorithms/novelty.py
novelty
def novelty(song, k=64, wlen_ms=100, start=0, duration=None, nchangepoints=5, feature="rms"): """Return points of high "novelty" in a song (e.g., significant musical transitions) :param song: Song to analyze :type song: :py:class:`radiotool.composer.Song` :param k: Width of comparison kernel (larger kernel finds coarser differences in music) :type k: int :param wlen_ms: Analysis window length in milliseconds :type wlen_ms: int :param start: Where to start analysis within the song (in seconds) :type start: float :param duration: How long of a chunk of the song to analyze (None analyzes the entire song after start) :type duration: float :param nchangepoints: How many novel change points to return :type nchangepoints: int :param feature: Music feature to use for novelty analysis :type feature: "rms" or "mfcc" (will support "chroma" eventually) :returns: List of change points (in seconds) :rtype: list of floats """ if feature != "rms" and feature != "mfcc": raise ValueError, "novelty currently only supports 'rms' and 'mfcc' features" if feature == "rms": frames = song.all_as_mono() wlen_samples = int(wlen_ms * song.samplerate / 1000) if duration is None: frames = frames[start * song.samplerate:] else: frames = frames[start * song.samplerate:(start + duration) * song.samplerate] # Compute energies hamming = np.hamming(wlen_samples) nwindows = int(2 * song.duration / wlen_samples - 1) energies = np.empty(nwindows) for i in range(nwindows): energies[i] = RMS_energy( hamming * frames[i * wlen_samples / 2: i * wlen_samples / 2 + wlen_samples] ) energies_list = [[x] for x in energies] elif feature == "mfcc": analysis = song.analysis energies_list = np.array(analysis["timbres"]) # Compute similarities S_matrix = 1 - scipy.spatial.distance.squareform( scipy.spatial.distance.pdist(energies_list, 'euclidean')) # smooth the C matrix with a gaussian taper C_matrix = np.kron(np.eye(2), np.ones((k,k))) -\ np.kron([[0, 1], [1, 0]], np.ones((k,k))) g = scipy.signal.gaussian(2*k, k) C_matrix = np.multiply(C_matrix, np.multiply.outer(g.T, g)) # Created checkerboard kernel N_vec = np.zeros(np.shape(S_matrix)[0]) for i in xrange(k, len(N_vec) - k): S_part = S_matrix[i - k:i + k, i - k:i + k] N_vec[i] = np.sum(np.multiply(S_part, C_matrix)) # Computed checkerboard response peaks = naive_peaks(N_vec, k=k / 2 + 1) out_peaks = [] if feature == "rms": # ensure that the points we return are more exciting # after the change point than before the change point for p in peaks: frame = p[0] if frame > k: left_frames = frames[int((frame - k) * wlen_samples / 2): int(frame * wlen_samples / 2)] right_frames = frames[int(frame * wlen_samples / 2): int((frame + k) * wlen_samples / 2)] if RMS_energy(left_frames) <\ RMS_energy(right_frames): out_peaks.append(p) out_peaks = [(x[0] * wlen_ms / 2000.0, x[1]) for x in out_peaks] for i, p in enumerate(out_peaks): if i == nchangepoints: break return [x[0] for x in out_peaks[:nchangepoints]] elif feature == "mfcc": beats = analysis["beats"] return [beats[int(b[0])] for b in peaks[:nchangepoints]]
python
def novelty(song, k=64, wlen_ms=100, start=0, duration=None, nchangepoints=5, feature="rms"): """Return points of high "novelty" in a song (e.g., significant musical transitions) :param song: Song to analyze :type song: :py:class:`radiotool.composer.Song` :param k: Width of comparison kernel (larger kernel finds coarser differences in music) :type k: int :param wlen_ms: Analysis window length in milliseconds :type wlen_ms: int :param start: Where to start analysis within the song (in seconds) :type start: float :param duration: How long of a chunk of the song to analyze (None analyzes the entire song after start) :type duration: float :param nchangepoints: How many novel change points to return :type nchangepoints: int :param feature: Music feature to use for novelty analysis :type feature: "rms" or "mfcc" (will support "chroma" eventually) :returns: List of change points (in seconds) :rtype: list of floats """ if feature != "rms" and feature != "mfcc": raise ValueError, "novelty currently only supports 'rms' and 'mfcc' features" if feature == "rms": frames = song.all_as_mono() wlen_samples = int(wlen_ms * song.samplerate / 1000) if duration is None: frames = frames[start * song.samplerate:] else: frames = frames[start * song.samplerate:(start + duration) * song.samplerate] # Compute energies hamming = np.hamming(wlen_samples) nwindows = int(2 * song.duration / wlen_samples - 1) energies = np.empty(nwindows) for i in range(nwindows): energies[i] = RMS_energy( hamming * frames[i * wlen_samples / 2: i * wlen_samples / 2 + wlen_samples] ) energies_list = [[x] for x in energies] elif feature == "mfcc": analysis = song.analysis energies_list = np.array(analysis["timbres"]) # Compute similarities S_matrix = 1 - scipy.spatial.distance.squareform( scipy.spatial.distance.pdist(energies_list, 'euclidean')) # smooth the C matrix with a gaussian taper C_matrix = np.kron(np.eye(2), np.ones((k,k))) -\ np.kron([[0, 1], [1, 0]], np.ones((k,k))) g = scipy.signal.gaussian(2*k, k) C_matrix = np.multiply(C_matrix, np.multiply.outer(g.T, g)) # Created checkerboard kernel N_vec = np.zeros(np.shape(S_matrix)[0]) for i in xrange(k, len(N_vec) - k): S_part = S_matrix[i - k:i + k, i - k:i + k] N_vec[i] = np.sum(np.multiply(S_part, C_matrix)) # Computed checkerboard response peaks = naive_peaks(N_vec, k=k / 2 + 1) out_peaks = [] if feature == "rms": # ensure that the points we return are more exciting # after the change point than before the change point for p in peaks: frame = p[0] if frame > k: left_frames = frames[int((frame - k) * wlen_samples / 2): int(frame * wlen_samples / 2)] right_frames = frames[int(frame * wlen_samples / 2): int((frame + k) * wlen_samples / 2)] if RMS_energy(left_frames) <\ RMS_energy(right_frames): out_peaks.append(p) out_peaks = [(x[0] * wlen_ms / 2000.0, x[1]) for x in out_peaks] for i, p in enumerate(out_peaks): if i == nchangepoints: break return [x[0] for x in out_peaks[:nchangepoints]] elif feature == "mfcc": beats = analysis["beats"] return [beats[int(b[0])] for b in peaks[:nchangepoints]]
[ "def", "novelty", "(", "song", ",", "k", "=", "64", ",", "wlen_ms", "=", "100", ",", "start", "=", "0", ",", "duration", "=", "None", ",", "nchangepoints", "=", "5", ",", "feature", "=", "\"rms\"", ")", ":", "if", "feature", "!=", "\"rms\"", "and",...
Return points of high "novelty" in a song (e.g., significant musical transitions) :param song: Song to analyze :type song: :py:class:`radiotool.composer.Song` :param k: Width of comparison kernel (larger kernel finds coarser differences in music) :type k: int :param wlen_ms: Analysis window length in milliseconds :type wlen_ms: int :param start: Where to start analysis within the song (in seconds) :type start: float :param duration: How long of a chunk of the song to analyze (None analyzes the entire song after start) :type duration: float :param nchangepoints: How many novel change points to return :type nchangepoints: int :param feature: Music feature to use for novelty analysis :type feature: "rms" or "mfcc" (will support "chroma" eventually) :returns: List of change points (in seconds) :rtype: list of floats
[ "Return", "points", "of", "high", "novelty", "in", "a", "song", "(", "e", ".", "g", ".", "significant", "musical", "transitions", ")" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/novelty.py#L10-L107
ucbvislab/radiotool
radiotool/algorithms/novelty.py
smooth_hanning
def smooth_hanning(x, size=11): """smooth a 1D array using a hanning window with requested size.""" if x.ndim != 1: raise ValueError, "smooth_hanning only accepts 1-D arrays." if x.size < size: raise ValueError, "Input vector needs to be bigger than window size." if size < 3: return x s = np.r_[x[size - 1:0:-1], x, x[-1:-size:-1]] w = np.hanning(size) y = np.convolve(w / w.sum(), s, mode='valid') return y
python
def smooth_hanning(x, size=11): """smooth a 1D array using a hanning window with requested size.""" if x.ndim != 1: raise ValueError, "smooth_hanning only accepts 1-D arrays." if x.size < size: raise ValueError, "Input vector needs to be bigger than window size." if size < 3: return x s = np.r_[x[size - 1:0:-1], x, x[-1:-size:-1]] w = np.hanning(size) y = np.convolve(w / w.sum(), s, mode='valid') return y
[ "def", "smooth_hanning", "(", "x", ",", "size", "=", "11", ")", ":", "if", "x", ".", "ndim", "!=", "1", ":", "raise", "ValueError", ",", "\"smooth_hanning only accepts 1-D arrays.\"", "if", "x", ".", "size", "<", "size", ":", "raise", "ValueError", ",", ...
smooth a 1D array using a hanning window with requested size.
[ "smooth", "a", "1D", "array", "using", "a", "hanning", "window", "with", "requested", "size", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/novelty.py#L110-L123
ucbvislab/radiotool
radiotool/algorithms/novelty.py
naive_peaks
def naive_peaks(vec, k=33): """A naive method for finding peaks of a signal. 1. Smooth vector 2. Find peaks (local maxima) 3. Find local max from original signal, pre-smoothing 4. Return (sorted, descending) peaks """ a = smooth_hanning(vec, k) k2 = (k - 1) / 2 peaks = np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True] p = np.array(np.where(peaks)[0]) maxidx = np.zeros(np.shape(p)) maxvals = np.zeros(np.shape(p)) for i, pk in enumerate(p): maxidx[i] = np.argmax(vec[pk - k2:pk + k2]) + pk - k2 maxvals[i] = np.max(vec[pk - k2:pk + k2]) out = np.array([maxidx, maxvals]).T return out[(-out[:, 1]).argsort()]
python
def naive_peaks(vec, k=33): """A naive method for finding peaks of a signal. 1. Smooth vector 2. Find peaks (local maxima) 3. Find local max from original signal, pre-smoothing 4. Return (sorted, descending) peaks """ a = smooth_hanning(vec, k) k2 = (k - 1) / 2 peaks = np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True] p = np.array(np.where(peaks)[0]) maxidx = np.zeros(np.shape(p)) maxvals = np.zeros(np.shape(p)) for i, pk in enumerate(p): maxidx[i] = np.argmax(vec[pk - k2:pk + k2]) + pk - k2 maxvals[i] = np.max(vec[pk - k2:pk + k2]) out = np.array([maxidx, maxvals]).T return out[(-out[:, 1]).argsort()]
[ "def", "naive_peaks", "(", "vec", ",", "k", "=", "33", ")", ":", "a", "=", "smooth_hanning", "(", "vec", ",", "k", ")", "k2", "=", "(", "k", "-", "1", ")", "/", "2", "peaks", "=", "np", ".", "r_", "[", "True", ",", "a", "[", "1", ":", "]"...
A naive method for finding peaks of a signal. 1. Smooth vector 2. Find peaks (local maxima) 3. Find local max from original signal, pre-smoothing 4. Return (sorted, descending) peaks
[ "A", "naive", "method", "for", "finding", "peaks", "of", "a", "signal", ".", "1", ".", "Smooth", "vector", "2", ".", "Find", "peaks", "(", "local", "maxima", ")", "3", ".", "Find", "local", "max", "from", "original", "signal", "pre", "-", "smoothing", ...
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/algorithms/novelty.py#L126-L147
rdireen/spherepy
spherepy/spherepy.py
zeros_coefs
def zeros_coefs(nmax, mmax, coef_type=scalar): """Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is set to 0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.zeros_coefs(5, 3) # same as above >>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1. """ if(mmax > nmax): raise ValueError(err_msg['nmax_g_mmax']) if(coef_type == scalar): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec = np.zeros(L, dtype=np.complex128) return ScalarCoefs(vec, nmax, mmax) elif(coef_type == vector): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec1 = np.zeros(L, dtype=np.complex128) vec2 = np.zeros(L, dtype=np.complex128) return VectorCoefs(vec1, vec2, nmax, mmax) else: raise TypeError(err_msg['ukn_coef_t'])
python
def zeros_coefs(nmax, mmax, coef_type=scalar): """Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is set to 0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.zeros_coefs(5, 3) # same as above >>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1. """ if(mmax > nmax): raise ValueError(err_msg['nmax_g_mmax']) if(coef_type == scalar): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec = np.zeros(L, dtype=np.complex128) return ScalarCoefs(vec, nmax, mmax) elif(coef_type == vector): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec1 = np.zeros(L, dtype=np.complex128) vec2 = np.zeros(L, dtype=np.complex128) return VectorCoefs(vec1, vec2, nmax, mmax) else: raise TypeError(err_msg['ukn_coef_t'])
[ "def", "zeros_coefs", "(", "nmax", ",", "mmax", ",", "coef_type", "=", "scalar", ")", ":", "if", "(", "mmax", ">", "nmax", ")", ":", "raise", "ValueError", "(", "err_msg", "[", "'nmax_g_mmax'", "]", ")", "if", "(", "coef_type", "==", "scalar", ")", "...
Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is set to 0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.zeros_coefs(5, 3) # same as above >>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1.
[ "Returns", "a", "ScalarCoefs", "object", "or", "a", "VectorCoeffs", "object", "where", "each", "of", "the", "coefficients", "is", "set", "to", "0", ".", "The", "structure", "is", "such", "that", "*", "nmax", "*", "is", "th", "largest", "*", "n", "*", "...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1376-L1421
rdireen/spherepy
spherepy/spherepy.py
random_coefs
def random_coefs(nmax, mmax, mu=0.0, sigma=1.0, coef_type=scalar): """Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is a normal random variable with mean 0 and standardard deviation 1.0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.random_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.random_coefs(5, 3) # same as above >>> vc = spherepy.random_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1. """ if(mmax > nmax): raise ValueError(err_msg['nmax_g_mmax']) if(coef_type == scalar): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) return ScalarCoefs(vec, nmax, mmax) elif(coef_type == vector): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec1 = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) vec1[0] = 0 vec2 = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) vec2[0] = 0 return VectorCoefs(vec1, vec2, nmax, mmax) else: raise TypeError(err_msg['ukn_coef_t'])
python
def random_coefs(nmax, mmax, mu=0.0, sigma=1.0, coef_type=scalar): """Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is a normal random variable with mean 0 and standardard deviation 1.0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.random_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.random_coefs(5, 3) # same as above >>> vc = spherepy.random_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1. """ if(mmax > nmax): raise ValueError(err_msg['nmax_g_mmax']) if(coef_type == scalar): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) return ScalarCoefs(vec, nmax, mmax) elif(coef_type == vector): L = (nmax + 1) + mmax * (2 * nmax - mmax + 1) vec1 = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) vec1[0] = 0 vec2 = np.random.normal(mu, sigma, L) + \ 1j * np.random.normal(mu, sigma, L) vec2[0] = 0 return VectorCoefs(vec1, vec2, nmax, mmax) else: raise TypeError(err_msg['ukn_coef_t'])
[ "def", "random_coefs", "(", "nmax", ",", "mmax", ",", "mu", "=", "0.0", ",", "sigma", "=", "1.0", ",", "coef_type", "=", "scalar", ")", ":", "if", "(", "mmax", ">", "nmax", ")", ":", "raise", "ValueError", "(", "err_msg", "[", "'nmax_g_mmax'", "]", ...
Returns a ScalarCoefs object or a VectorCoeffs object where each of the coefficients is a normal random variable with mean 0 and standardard deviation 1.0. The structure is such that *nmax* is th largest *n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*. (See *ScalarCoefs* and *VectorCoefs* for details.) Examples:: >>> c = spherepy.random_coefs(5, 3, coef_type = spherepy.scalar) >>> c = spherepy.random_coefs(5, 3) # same as above >>> vc = spherepy.random_coefs(5, 3, coef_type = spherepy.vector) Args: nmax (int): Largest *n* value in the set of modes. mmax (int): Largest abs(*m*) value in the set of modes. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. If you would like to return a set of vector spherical hamonic coefficients, the preferred way to do so is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector). Returns: coefs: Returns a ScalarCoefs object if coef_type is either blank or set to 0. Returns a VectorCoefs object if coef_type = 1. Raises: TypeError: If coef_type is anything but 0 or 1.
[ "Returns", "a", "ScalarCoefs", "object", "or", "a", "VectorCoeffs", "object", "where", "each", "of", "the", "coefficients", "is", "a", "normal", "random", "variable", "with", "mean", "0", "and", "standardard", "deviation", "1", ".", "0", ".", "The", "structu...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1472-L1523
rdireen/spherepy
spherepy/spherepy.py
zeros_patt_uniform
def zeros_patt_uniform(nrows, ncols, patt_type=scalar): """Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to 0. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.zeros_patt_uniform(6, 8) # same as above >>> F = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1. """ if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) if(patt_type == scalar): cdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) return ScalarPatternUniform(cdata, doublesphere=True) elif(patt_type == vector): tcdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) pcdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) return TransversePatternUniform(tcdata, pcdata, doublesphere=True) else: raise TypeError(err_msg['ukn_patt_t'])
python
def zeros_patt_uniform(nrows, ncols, patt_type=scalar): """Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to 0. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.zeros_patt_uniform(6, 8) # same as above >>> F = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1. """ if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) if(patt_type == scalar): cdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) return ScalarPatternUniform(cdata, doublesphere=True) elif(patt_type == vector): tcdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) pcdata = np.zeros((2 * nrows - 2, ncols), dtype=np.complex128) return TransversePatternUniform(tcdata, pcdata, doublesphere=True) else: raise TypeError(err_msg['ukn_patt_t'])
[ "def", "zeros_patt_uniform", "(", "nrows", ",", "ncols", ",", "patt_type", "=", "scalar", ")", ":", "if", "np", ".", "mod", "(", "ncols", ",", "2", ")", "==", "1", ":", "raise", "ValueError", "(", "err_msg", "[", "'ncols_even'", "]", ")", "if", "(", ...
Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to 0. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.zeros_patt_uniform(6, 8) # same as above >>> F = spherepy.zeros_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1.
[ "Returns", "a", "ScalarPatternUniform", "object", "or", "a", "VectorPatternUniform", "object", "where", "each", "of", "the", "elements", "is", "set", "to", "0", ".", "*", "nrows", "*", "is", "the", "number", "of", "rows", "in", "the", "pattern", "which", "...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1525-L1573
rdireen/spherepy
spherepy/spherepy.py
random_patt_uniform
def random_patt_uniform(nrows, ncols, patt_type=scalar): """Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to a normal random variable with zero mean and unit standard deviation. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.random_patt_uniform(6, 8) # same as above >>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1. """ if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) if(patt_type == scalar): vec = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return ScalarPatternUniform(vec.reshape((nrows, ncols)), doublesphere=False) elif(patt_type == vector): vec1 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) vec2 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return TransversePatternUniform(vec1.reshape((nrows, ncols)), vec2.reshape((nrows, ncols)), doublesphere=False) else: raise TypeError(err_msg['ukn_patt_t'])
python
def random_patt_uniform(nrows, ncols, patt_type=scalar): """Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to a normal random variable with zero mean and unit standard deviation. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.random_patt_uniform(6, 8) # same as above >>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1. """ if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) if(patt_type == scalar): vec = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return ScalarPatternUniform(vec.reshape((nrows, ncols)), doublesphere=False) elif(patt_type == vector): vec1 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) vec2 = np.random.normal(0.0, 1.0, nrows * ncols) + \ 1j * np.random.normal(0.0, 1.0, nrows * ncols) return TransversePatternUniform(vec1.reshape((nrows, ncols)), vec2.reshape((nrows, ncols)), doublesphere=False) else: raise TypeError(err_msg['ukn_patt_t'])
[ "def", "random_patt_uniform", "(", "nrows", ",", "ncols", ",", "patt_type", "=", "scalar", ")", ":", "if", "np", ".", "mod", "(", "ncols", ",", "2", ")", "==", "1", ":", "raise", "ValueError", "(", "err_msg", "[", "'ncols_even'", "]", ")", "if", "(",...
Returns a ScalarPatternUniform object or a VectorPatternUniform object where each of the elements is set to a normal random variable with zero mean and unit standard deviation. *nrows* is the number of rows in the pattern, which corresponds to the theta axis. *ncols* must be even and is the number of columns in the pattern and corresponds to the phi axis. (See *ScalarPatternUniform* and *VectorPatternUniform* for details.) Examples:: >>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar) >>> f = spherepy.random_patt_uniform(6, 8) # same as above >>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector) Args: nrows (int): Number of rows corresponding to the theta axis. ncols (int): Number of columns corresponding to the phi axis. To get the speed and accuracy I need, this value **must** be even. coef_type (int, optional): Set to 0 for scalar, and 1 for vector. The default option is scalar. Returns: coefs: Returns a ScalarPatternUniform object if coef_type is either blank or set to 0. Returns a VectorPatternUniform object if coef_type = 1. Raises: ValueError: If ncols is not even. TypeError: If coef_type is anything but 0 or 1.
[ "Returns", "a", "ScalarPatternUniform", "object", "or", "a", "VectorPatternUniform", "object", "where", "each", "of", "the", "elements", "is", "set", "to", "a", "normal", "random", "variable", "with", "zero", "mean", "and", "unit", "standard", "deviation", ".", ...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1625-L1680
rdireen/spherepy
spherepy/spherepy.py
double_sphere
def double_sphere(cdata, sym): """ Ensures that the data within cdata has double sphere symmetry. Example:: >>> spherepy.doublesphere(cdata, 1) Args: sym (int): is 1 for scalar data and -1 for vector data Returns: numpy.array([*,*], dtype=np.complex128) containing array with doublesphere symmetry. """ nrows = cdata.shape[0] ncols = cdata.shape[1] ddata = np.zeros([nrows, ncols], dtype=np.complex128) for n in xrange(0, nrows): for m in xrange(0, ncols): s = sym * cdata[np.mod(nrows - n, nrows), np.mod(int(np.floor(ncols / 2)) + m, ncols)] t = cdata[n, m] if s * t == 0: ddata[n, m] = s + t else: ddata[n, m] = (s + t) / 2 return ddata
python
def double_sphere(cdata, sym): """ Ensures that the data within cdata has double sphere symmetry. Example:: >>> spherepy.doublesphere(cdata, 1) Args: sym (int): is 1 for scalar data and -1 for vector data Returns: numpy.array([*,*], dtype=np.complex128) containing array with doublesphere symmetry. """ nrows = cdata.shape[0] ncols = cdata.shape[1] ddata = np.zeros([nrows, ncols], dtype=np.complex128) for n in xrange(0, nrows): for m in xrange(0, ncols): s = sym * cdata[np.mod(nrows - n, nrows), np.mod(int(np.floor(ncols / 2)) + m, ncols)] t = cdata[n, m] if s * t == 0: ddata[n, m] = s + t else: ddata[n, m] = (s + t) / 2 return ddata
[ "def", "double_sphere", "(", "cdata", ",", "sym", ")", ":", "nrows", "=", "cdata", ".", "shape", "[", "0", "]", "ncols", "=", "cdata", ".", "shape", "[", "1", "]", "ddata", "=", "np", ".", "zeros", "(", "[", "nrows", ",", "ncols", "]", ",", "dt...
Ensures that the data within cdata has double sphere symmetry. Example:: >>> spherepy.doublesphere(cdata, 1) Args: sym (int): is 1 for scalar data and -1 for vector data Returns: numpy.array([*,*], dtype=np.complex128) containing array with doublesphere symmetry.
[ "Ensures", "that", "the", "data", "within", "cdata", "has", "double", "sphere", "symmetry", ".", "Example", "::", ">>>", "spherepy", ".", "doublesphere", "(", "cdata", "1", ")", "Args", ":", "sym", "(", "int", ")", ":", "is", "1", "for", "scalar", "dat...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1751-L1782
rdireen/spherepy
spherepy/spherepy.py
pretty_coefs
def pretty_coefs(c): """Prints out the first 2 modes of a ScalarCoeffs object. This is mostly used for instructional purposes. (*ScalarPatternUniform*) Example:: >>> spherepy.pretty_coefs(c) c[n, m] ======= 2: 0j 0j 0j 0j 0j 1: 1+0j 0j -1+0j 0: 1j n ------------- ------------- ------------- ------------- ------------- m = -2 m = -1 m = 0 m = 1 m = 2 Args: c (ScalarCoefs): Coefficients to be printed. Returns: nothing, just outputs something pretty to the console. """ cfit = c[0:2, :] cvec = cfit._vec sa = [_tiny_rep(val) for val in cvec] while len(sa) < 9: sa.append("") sa = [sa[n].center(13) for n in range(0, 9)] print(pretty_display_string.format(sa[0], sa[1], sa[2], sa[3], sa[4], sa[5], sa[6], sa[7], sa[8]))
python
def pretty_coefs(c): """Prints out the first 2 modes of a ScalarCoeffs object. This is mostly used for instructional purposes. (*ScalarPatternUniform*) Example:: >>> spherepy.pretty_coefs(c) c[n, m] ======= 2: 0j 0j 0j 0j 0j 1: 1+0j 0j -1+0j 0: 1j n ------------- ------------- ------------- ------------- ------------- m = -2 m = -1 m = 0 m = 1 m = 2 Args: c (ScalarCoefs): Coefficients to be printed. Returns: nothing, just outputs something pretty to the console. """ cfit = c[0:2, :] cvec = cfit._vec sa = [_tiny_rep(val) for val in cvec] while len(sa) < 9: sa.append("") sa = [sa[n].center(13) for n in range(0, 9)] print(pretty_display_string.format(sa[0], sa[1], sa[2], sa[3], sa[4], sa[5], sa[6], sa[7], sa[8]))
[ "def", "pretty_coefs", "(", "c", ")", ":", "cfit", "=", "c", "[", "0", ":", "2", ",", ":", "]", "cvec", "=", "cfit", ".", "_vec", "sa", "=", "[", "_tiny_rep", "(", "val", ")", "for", "val", "in", "cvec", "]", "while", "len", "(", "sa", ")", ...
Prints out the first 2 modes of a ScalarCoeffs object. This is mostly used for instructional purposes. (*ScalarPatternUniform*) Example:: >>> spherepy.pretty_coefs(c) c[n, m] ======= 2: 0j 0j 0j 0j 0j 1: 1+0j 0j -1+0j 0: 1j n ------------- ------------- ------------- ------------- ------------- m = -2 m = -1 m = 0 m = 1 m = 2 Args: c (ScalarCoefs): Coefficients to be printed. Returns: nothing, just outputs something pretty to the console.
[ "Prints", "out", "the", "first", "2", "modes", "of", "a", "ScalarCoeffs", "object", ".", "This", "is", "mostly", "used", "for", "instructional", "purposes", ".", "(", "*", "ScalarPatternUniform", "*", ")", "Example", "::", ">>>", "spherepy", ".", "pretty_coe...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1790-L1828
rdireen/spherepy
spherepy/spherepy.py
spht
def spht(ssphere, nmax=None, mmax=None): """Transforms ScalarPatternUniform object *ssphere* into a set of scalar spherical harmonics stored in ScalarCoefs. Example:: >>> p = spherepy.random_patt_uniform(6, 8) >>> c = spherepy.spht(p) >>> spherepy.pretty_coefs(c) Args: ssphere (ScalarPatternUniform): The pattern to be transformed. nmax (int, optional): The maximum number of *n* values required. If a value isn't passed, *nmax* is the number of rows in ssphere minus one. mmax (int, optional): The maximum number of *m* values required. If a value isn't passed, *mmax* is half the number of columns in ssphere minus one. Returns: ScalarCoefs: The object containing the coefficients of the scalar spherical harmonic transform. Raises: ValueError: If *nmax* and *mmax* are too large or *mmax* > *nmax*. """ if nmax == None: nmax = ssphere.nrows - 2 mmax = int(ssphere.ncols / 2) - 1 elif mmax == None: mmax = nmax if mmax > nmax: raise ValueError(err_msg['nmax_g_mmax']) if nmax >= ssphere.nrows - 1: raise ValueError(err_msg['nmax_too_lrg']) if mmax >= ssphere.ncols / 2: raise ValueError(err_msg['mmax_too_lrg']) dnrows = ssphere._dsphere.shape[0] ncols = ssphere._dsphere.shape[1] if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) fdata = np.fft.fft2(ssphere._dsphere) / (dnrows * ncols) ops.fix_even_row_data_fc(fdata) fdata_extended = np.zeros([dnrows + 2, ncols], dtype=np.complex128) ops.pad_rows_fdata(fdata, fdata_extended) ops.sin_fc(fdata_extended) N = nmax + 1; NC = N + mmax * (2 * N - mmax - 1); sc = np.zeros(NC, dtype=np.complex128) # check if we are using c extended versions of the code or not if use_cext: csphi.fc_to_sc(fdata_extended, sc, nmax, mmax) else: sc = pysphi.fc_to_sc(fdata_extended, nmax, mmax) return ScalarCoefs(sc, nmax, mmax)
python
def spht(ssphere, nmax=None, mmax=None): """Transforms ScalarPatternUniform object *ssphere* into a set of scalar spherical harmonics stored in ScalarCoefs. Example:: >>> p = spherepy.random_patt_uniform(6, 8) >>> c = spherepy.spht(p) >>> spherepy.pretty_coefs(c) Args: ssphere (ScalarPatternUniform): The pattern to be transformed. nmax (int, optional): The maximum number of *n* values required. If a value isn't passed, *nmax* is the number of rows in ssphere minus one. mmax (int, optional): The maximum number of *m* values required. If a value isn't passed, *mmax* is half the number of columns in ssphere minus one. Returns: ScalarCoefs: The object containing the coefficients of the scalar spherical harmonic transform. Raises: ValueError: If *nmax* and *mmax* are too large or *mmax* > *nmax*. """ if nmax == None: nmax = ssphere.nrows - 2 mmax = int(ssphere.ncols / 2) - 1 elif mmax == None: mmax = nmax if mmax > nmax: raise ValueError(err_msg['nmax_g_mmax']) if nmax >= ssphere.nrows - 1: raise ValueError(err_msg['nmax_too_lrg']) if mmax >= ssphere.ncols / 2: raise ValueError(err_msg['mmax_too_lrg']) dnrows = ssphere._dsphere.shape[0] ncols = ssphere._dsphere.shape[1] if np.mod(ncols, 2) == 1: raise ValueError(err_msg['ncols_even']) fdata = np.fft.fft2(ssphere._dsphere) / (dnrows * ncols) ops.fix_even_row_data_fc(fdata) fdata_extended = np.zeros([dnrows + 2, ncols], dtype=np.complex128) ops.pad_rows_fdata(fdata, fdata_extended) ops.sin_fc(fdata_extended) N = nmax + 1; NC = N + mmax * (2 * N - mmax - 1); sc = np.zeros(NC, dtype=np.complex128) # check if we are using c extended versions of the code or not if use_cext: csphi.fc_to_sc(fdata_extended, sc, nmax, mmax) else: sc = pysphi.fc_to_sc(fdata_extended, nmax, mmax) return ScalarCoefs(sc, nmax, mmax)
[ "def", "spht", "(", "ssphere", ",", "nmax", "=", "None", ",", "mmax", "=", "None", ")", ":", "if", "nmax", "==", "None", ":", "nmax", "=", "ssphere", ".", "nrows", "-", "2", "mmax", "=", "int", "(", "ssphere", ".", "ncols", "/", "2", ")", "-", ...
Transforms ScalarPatternUniform object *ssphere* into a set of scalar spherical harmonics stored in ScalarCoefs. Example:: >>> p = spherepy.random_patt_uniform(6, 8) >>> c = spherepy.spht(p) >>> spherepy.pretty_coefs(c) Args: ssphere (ScalarPatternUniform): The pattern to be transformed. nmax (int, optional): The maximum number of *n* values required. If a value isn't passed, *nmax* is the number of rows in ssphere minus one. mmax (int, optional): The maximum number of *m* values required. If a value isn't passed, *mmax* is half the number of columns in ssphere minus one. Returns: ScalarCoefs: The object containing the coefficients of the scalar spherical harmonic transform. Raises: ValueError: If *nmax* and *mmax* are too large or *mmax* > *nmax*.
[ "Transforms", "ScalarPatternUniform", "object", "*", "ssphere", "*", "into", "a", "set", "of", "scalar", "spherical", "harmonics", "stored", "in", "ScalarCoefs", ".", "Example", "::", ">>>", "p", "=", "spherepy", ".", "random_patt_uniform", "(", "6", "8", ")",...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L1831-L1900