code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def dispatch(self, event: Any) -> None:
"""Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
"""
if event.type:
getattr(self, "ev_%s" % (event.type.lower(),))(event) | def function[dispatch, parameter[self, event]]:
constant[Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
]
if name[event].type begin[:]
call[call[name[getattr], parameter[name[self], binary_operation[constant[ev_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f58fd00>]]]]], parameter[name[event]]] | keyword[def] identifier[dispatch] ( identifier[self] , identifier[event] : identifier[Any] )-> keyword[None] :
literal[string]
keyword[if] identifier[event] . identifier[type] :
identifier[getattr] ( identifier[self] , literal[string] %( identifier[event] . identifier[type] . identifier[lower] (),))( identifier[event] ) | def dispatch(self, event: Any) -> None:
"""Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
"""
if event.type:
getattr(self, 'ev_%s' % (event.type.lower(),))(event) # depends on [control=['if'], data=[]] |
def angles(triangles):
"""
Calculates the angles of input triangles.
Parameters
------------
triangles : (n, 3, 3) float
Vertex positions
Returns
------------
angles : (n, 3) float
Angles at vertex positions, in radians
"""
# get a vector for each edge of the triangle
u = triangles[:, 1] - triangles[:, 0]
v = triangles[:, 2] - triangles[:, 0]
w = triangles[:, 2] - triangles[:, 1]
# normalize each vector in place
u /= np.linalg.norm(u, axis=1, keepdims=True)
v /= np.linalg.norm(v, axis=1, keepdims=True)
w /= np.linalg.norm(w, axis=1, keepdims=True)
# run the cosine and an einsum that definitely does something
a = np.arccos(np.clip(np.einsum('ij, ij->i', u, v), -1, 1))
b = np.arccos(np.clip(np.einsum('ij, ij->i', -u, w), -1, 1))
c = np.pi - a - b
return np.column_stack([a, b, c]) | def function[angles, parameter[triangles]]:
constant[
Calculates the angles of input triangles.
Parameters
------------
triangles : (n, 3, 3) float
Vertex positions
Returns
------------
angles : (n, 3) float
Angles at vertex positions, in radians
]
variable[u] assign[=] binary_operation[call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c31c0>, <ast.Constant object at 0x7da1b23c3250>]]] - call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c32b0>, <ast.Constant object at 0x7da1b23c5030>]]]]
variable[v] assign[=] binary_operation[call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c5540>, <ast.Constant object at 0x7da1b23c4160>]]] - call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c5420>, <ast.Constant object at 0x7da1b23c4c70>]]]]
variable[w] assign[=] binary_operation[call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c5db0>, <ast.Constant object at 0x7da1b23c6530>]]] - call[name[triangles]][tuple[[<ast.Slice object at 0x7da1b23c43a0>, <ast.Constant object at 0x7da1b23c66e0>]]]]
<ast.AugAssign object at 0x7da1b23c4c40>
<ast.AugAssign object at 0x7da1b23c5ae0>
<ast.AugAssign object at 0x7da1b23c4430>
variable[a] assign[=] call[name[np].arccos, parameter[call[name[np].clip, parameter[call[name[np].einsum, parameter[constant[ij, ij->i], name[u], name[v]]], <ast.UnaryOp object at 0x7da1b23c4250>, constant[1]]]]]
variable[b] assign[=] call[name[np].arccos, parameter[call[name[np].clip, parameter[call[name[np].einsum, parameter[constant[ij, ij->i], <ast.UnaryOp object at 0x7da1b23c5390>, name[w]]], <ast.UnaryOp object at 0x7da1b23c5720>, constant[1]]]]]
variable[c] assign[=] binary_operation[binary_operation[name[np].pi - name[a]] - name[b]]
return[call[name[np].column_stack, parameter[list[[<ast.Name object at 0x7da1b23c5270>, <ast.Name object at 0x7da1b23c6410>, <ast.Name object at 0x7da1b23c4670>]]]]] | keyword[def] identifier[angles] ( identifier[triangles] ):
literal[string]
identifier[u] = identifier[triangles] [:, literal[int] ]- identifier[triangles] [:, literal[int] ]
identifier[v] = identifier[triangles] [:, literal[int] ]- identifier[triangles] [:, literal[int] ]
identifier[w] = identifier[triangles] [:, literal[int] ]- identifier[triangles] [:, literal[int] ]
identifier[u] /= identifier[np] . identifier[linalg] . identifier[norm] ( identifier[u] , identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] )
identifier[v] /= identifier[np] . identifier[linalg] . identifier[norm] ( identifier[v] , identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] )
identifier[w] /= identifier[np] . identifier[linalg] . identifier[norm] ( identifier[w] , identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] )
identifier[a] = identifier[np] . identifier[arccos] ( identifier[np] . identifier[clip] ( identifier[np] . identifier[einsum] ( literal[string] , identifier[u] , identifier[v] ),- literal[int] , literal[int] ))
identifier[b] = identifier[np] . identifier[arccos] ( identifier[np] . identifier[clip] ( identifier[np] . identifier[einsum] ( literal[string] ,- identifier[u] , identifier[w] ),- literal[int] , literal[int] ))
identifier[c] = identifier[np] . identifier[pi] - identifier[a] - identifier[b]
keyword[return] identifier[np] . identifier[column_stack] ([ identifier[a] , identifier[b] , identifier[c] ]) | def angles(triangles):
"""
Calculates the angles of input triangles.
Parameters
------------
triangles : (n, 3, 3) float
Vertex positions
Returns
------------
angles : (n, 3) float
Angles at vertex positions, in radians
"""
# get a vector for each edge of the triangle
u = triangles[:, 1] - triangles[:, 0]
v = triangles[:, 2] - triangles[:, 0]
w = triangles[:, 2] - triangles[:, 1]
# normalize each vector in place
u /= np.linalg.norm(u, axis=1, keepdims=True)
v /= np.linalg.norm(v, axis=1, keepdims=True)
w /= np.linalg.norm(w, axis=1, keepdims=True)
# run the cosine and an einsum that definitely does something
a = np.arccos(np.clip(np.einsum('ij, ij->i', u, v), -1, 1))
b = np.arccos(np.clip(np.einsum('ij, ij->i', -u, w), -1, 1))
c = np.pi - a - b
return np.column_stack([a, b, c]) |
def top_sort(graph):
""" Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
"""
order, enter, state = [], set(graph), {}
def is_ready(node):
lst = graph.get(node, ())
if len(lst) == 0:
return True
for k in lst:
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk != BLACK:
return False
return True
while enter:
node = enter.pop()
stack = []
while True:
state[node] = GRAY
stack.append(node)
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk == BLACK:
continue
enter.discard(k)
stack.append(k)
while stack and is_ready(stack[-1]):
node = stack.pop()
order.append(node)
state[node] = BLACK
if len(stack) == 0:
break
node = stack.pop()
return order | def function[top_sort, parameter[graph]]:
constant[ Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
]
<ast.Tuple object at 0x7da1b20ba8c0> assign[=] tuple[[<ast.List object at 0x7da1b20b9fc0>, <ast.Call object at 0x7da1b20bad40>, <ast.Dict object at 0x7da1b20b94e0>]]
def function[is_ready, parameter[node]]:
variable[lst] assign[=] call[name[graph].get, parameter[name[node], tuple[[]]]]
if compare[call[name[len], parameter[name[lst]]] equal[==] constant[0]] begin[:]
return[constant[True]]
for taget[name[k]] in starred[name[lst]] begin[:]
variable[sk] assign[=] call[name[state].get, parameter[name[k], constant[None]]]
if compare[name[sk] equal[==] name[GRAY]] begin[:]
<ast.Raise object at 0x7da1b20b8760>
if compare[name[sk] not_equal[!=] name[BLACK]] begin[:]
return[constant[False]]
return[constant[True]]
while name[enter] begin[:]
variable[node] assign[=] call[name[enter].pop, parameter[]]
variable[stack] assign[=] list[[]]
while constant[True] begin[:]
call[name[state]][name[node]] assign[=] name[GRAY]
call[name[stack].append, parameter[name[node]]]
for taget[name[k]] in starred[call[name[graph].get, parameter[name[node], tuple[[]]]]] begin[:]
variable[sk] assign[=] call[name[state].get, parameter[name[k], constant[None]]]
if compare[name[sk] equal[==] name[GRAY]] begin[:]
<ast.Raise object at 0x7da1b1eeb8b0>
if compare[name[sk] equal[==] name[BLACK]] begin[:]
continue
call[name[enter].discard, parameter[name[k]]]
call[name[stack].append, parameter[name[k]]]
while <ast.BoolOp object at 0x7da1b1eea680> begin[:]
variable[node] assign[=] call[name[stack].pop, parameter[]]
call[name[order].append, parameter[name[node]]]
call[name[state]][name[node]] assign[=] name[BLACK]
if compare[call[name[len], parameter[name[stack]]] equal[==] constant[0]] begin[:]
break
variable[node] assign[=] call[name[stack].pop, parameter[]]
return[name[order]] | keyword[def] identifier[top_sort] ( identifier[graph] ):
literal[string]
identifier[order] , identifier[enter] , identifier[state] =[], identifier[set] ( identifier[graph] ),{}
keyword[def] identifier[is_ready] ( identifier[node] ):
identifier[lst] = identifier[graph] . identifier[get] ( identifier[node] ,())
keyword[if] identifier[len] ( identifier[lst] )== literal[int] :
keyword[return] keyword[True]
keyword[for] identifier[k] keyword[in] identifier[lst] :
identifier[sk] = identifier[state] . identifier[get] ( identifier[k] , keyword[None] )
keyword[if] identifier[sk] == identifier[GRAY] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[sk] != identifier[BLACK] :
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[while] identifier[enter] :
identifier[node] = identifier[enter] . identifier[pop] ()
identifier[stack] =[]
keyword[while] keyword[True] :
identifier[state] [ identifier[node] ]= identifier[GRAY]
identifier[stack] . identifier[append] ( identifier[node] )
keyword[for] identifier[k] keyword[in] identifier[graph] . identifier[get] ( identifier[node] ,()):
identifier[sk] = identifier[state] . identifier[get] ( identifier[k] , keyword[None] )
keyword[if] identifier[sk] == identifier[GRAY] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[sk] == identifier[BLACK] :
keyword[continue]
identifier[enter] . identifier[discard] ( identifier[k] )
identifier[stack] . identifier[append] ( identifier[k] )
keyword[while] identifier[stack] keyword[and] identifier[is_ready] ( identifier[stack] [- literal[int] ]):
identifier[node] = identifier[stack] . identifier[pop] ()
identifier[order] . identifier[append] ( identifier[node] )
identifier[state] [ identifier[node] ]= identifier[BLACK]
keyword[if] identifier[len] ( identifier[stack] )== literal[int] :
keyword[break]
identifier[node] = identifier[stack] . identifier[pop] ()
keyword[return] identifier[order] | def top_sort(graph):
""" Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
"""
(order, enter, state) = ([], set(graph), {})
def is_ready(node):
lst = graph.get(node, ())
if len(lst) == 0:
return True # depends on [control=['if'], data=[]]
for k in lst:
sk = state.get(k, None)
if sk == GRAY:
raise ValueError('cycle') # depends on [control=['if'], data=[]]
if sk != BLACK:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return True
while enter:
node = enter.pop()
stack = []
while True:
state[node] = GRAY
stack.append(node)
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY:
raise ValueError('cycle') # depends on [control=['if'], data=[]]
if sk == BLACK:
continue # depends on [control=['if'], data=[]]
enter.discard(k)
stack.append(k) # depends on [control=['for'], data=['k']]
while stack and is_ready(stack[-1]):
node = stack.pop()
order.append(node)
state[node] = BLACK # depends on [control=['while'], data=[]]
if len(stack) == 0:
break # depends on [control=['if'], data=[]]
node = stack.pop() # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]]
return order |
def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address)) | def function[handle_request, parameter[self]]:
constant[simply collect requests and put them on the queue for the workers.]
<ast.Try object at 0x7da18ede4880>
if call[name[self].verify_request, parameter[name[request], name[client_address]]] begin[:]
call[name[self].requests.put, parameter[tuple[[<ast.Name object at 0x7da20e9b0d00>, <ast.Name object at 0x7da20e9b2560>]]]] | keyword[def] identifier[handle_request] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[request] , identifier[client_address] = identifier[self] . identifier[get_request] ()
keyword[except] identifier[socket] . identifier[error] :
keyword[return]
keyword[if] identifier[self] . identifier[verify_request] ( identifier[request] , identifier[client_address] ):
identifier[self] . identifier[requests] . identifier[put] (( identifier[request] , identifier[client_address] )) | def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
(request, client_address) = self.get_request() # depends on [control=['try'], data=[]]
except socket.error:
return # depends on [control=['except'], data=[]]
if self.verify_request(request, client_address):
self.requests.put((request, client_address)) # depends on [control=['if'], data=[]] |
def barmatch2(data, tups, cutters, longbar, matchdict, fnum):
"""
cleaner barmatch func...
"""
## how many reads to store before writing to disk
waitchunk = int(1e6)
## pid name for this engine
epid = os.getpid()
## counters for total reads, those with cutsite, and those that matched
filestat = np.zeros(3, dtype=np.int)
## store observed sample matches
samplehits = {}
## dictionaries to store first and second reads until writing to file
dsort1 = {}
dsort2 = {}
## dictionary for all bars matched in sample
dbars = {}
## fill for sample names
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
samplehits[sname] = 0
dsort1[sname] = []
dsort2[sname] = []
dbars[sname] = set()
## store observed bars
barhits = {}
for barc in matchdict:
barhits[barc] = 0
## store others
misses = {}
misses['_'] = 0
## build func for finding barcode
getbarcode = get_barcode_func(data, longbar)
## get quart iterator of reads
if tups[0].endswith(".gz"):
ofunc = gzip.open
else:
ofunc = open
## create iterators
ofile1 = ofunc(tups[0], 'r')
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], 'r')
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2)
else:
quarts = itertools.izip(quart1, iter(int, 1))
## go until end of the file
while 1:
try:
read1, read2 = quarts.next()
read1 = list(read1)
filestat[0] += 1
except StopIteration:
break
barcode = ""
## Get barcode_R2 and check for matching sample name
if '3rad' in data.paramsdict["datatype"]:
## Here we're just reusing the findbcode function
## for R2, and reconfiguring the longbar tuple to have the
## maxlen for the R2 barcode
## Parse barcode. Use the parsing function selected above.
barcode1 = find3radbcode(cutters=cutters,
longbar=longbar, read1=read1)
barcode2 = find3radbcode(cutters=cutters,
longbar=(longbar[2], longbar[1]), read1=read2)
barcode = barcode1 + "+" + barcode2
else:
## Parse barcode. Uses the parsing function selected above.
barcode = getbarcode(cutters, read1, longbar)
## find if it matches
sname_match = matchdict.get(barcode)
if sname_match:
#sample_index[filestat[0]-1] = snames.index(sname_match) + 1
## record who matched
dbars[sname_match].add(barcode)
filestat[1] += 1
filestat[2] += 1
samplehits[sname_match] += 1
barhits[barcode] += 1
if barcode in barhits:
barhits[barcode] += 1
else:
barhits[barcode] = 1
## trim off barcode
lenbar = len(barcode)
if '3rad' in data.paramsdict["datatype"]:
## Iff 3rad trim the len of the first barcode
lenbar = len(barcode1)
if data.paramsdict["datatype"] == '2brad':
overlen = len(cutters[0][0]) + lenbar + 1
read1[1] = read1[1][:-overlen] + "\n"
read1[3] = read1[3][:-overlen] + "\n"
else:
read1[1] = read1[1][lenbar:]
read1[3] = read1[3][lenbar:]
## Trim barcode off R2 and append. Only 3rad datatype
## pays the cpu cost of splitting R2
if '3rad' in data.paramsdict["datatype"]:
read2 = list(read2)
read2[1] = read2[1][len(barcode2):]
read2[3] = read2[3][len(barcode2):]
## append to dsort
dsort1[sname_match].append("".join(read1))
if 'pair' in data.paramsdict["datatype"]:
dsort2[sname_match].append("".join(read2))
else:
misses["_"] += 1
if barcode:
filestat[1] += 1
## how can we make it so all of the engines aren't trying to write to
## ~100-200 files all at the same time? This is the I/O limit we hit..
## write out at 100K to keep memory low. It is fine on HPC which can
## write parallel, but regular systems might crash
if not filestat[0] % waitchunk:
## write the remaining reads to file"
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
## clear out dsorts
for sample in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
dsort1[sname] = []
dsort2[sname] = []
## reset longlist
#longlist = np.zeros(waitchunk, dtype=np.uint32)
## close open files
ofile1.close()
if tups[1]:
ofile2.close()
## write the remaining reads to file
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
## return stats in saved pickle b/c return_queue is too small
## and the size of the match dictionary can become quite large
samplestats = [samplehits, barhits, misses, dbars]
outname = os.path.join(data.dirs.fastqs, "tmp_{}_{}.p".format(epid, fnum))
with open(outname, 'w') as wout:
pickle.dump([filestat, samplestats], wout)
return outname | def function[barmatch2, parameter[data, tups, cutters, longbar, matchdict, fnum]]:
constant[
cleaner barmatch func...
]
variable[waitchunk] assign[=] call[name[int], parameter[constant[1000000.0]]]
variable[epid] assign[=] call[name[os].getpid, parameter[]]
variable[filestat] assign[=] call[name[np].zeros, parameter[constant[3]]]
variable[samplehits] assign[=] dictionary[[], []]
variable[dsort1] assign[=] dictionary[[], []]
variable[dsort2] assign[=] dictionary[[], []]
variable[dbars] assign[=] dictionary[[], []]
for taget[name[sname]] in starred[name[data].barcodes] begin[:]
if compare[constant[-technical-replicate-] in name[sname]] begin[:]
variable[sname] assign[=] call[call[name[sname].rsplit, parameter[constant[-technical-replicate], constant[1]]]][constant[0]]
call[name[samplehits]][name[sname]] assign[=] constant[0]
call[name[dsort1]][name[sname]] assign[=] list[[]]
call[name[dsort2]][name[sname]] assign[=] list[[]]
call[name[dbars]][name[sname]] assign[=] call[name[set], parameter[]]
variable[barhits] assign[=] dictionary[[], []]
for taget[name[barc]] in starred[name[matchdict]] begin[:]
call[name[barhits]][name[barc]] assign[=] constant[0]
variable[misses] assign[=] dictionary[[], []]
call[name[misses]][constant[_]] assign[=] constant[0]
variable[getbarcode] assign[=] call[name[get_barcode_func], parameter[name[data], name[longbar]]]
if call[call[name[tups]][constant[0]].endswith, parameter[constant[.gz]]] begin[:]
variable[ofunc] assign[=] name[gzip].open
variable[ofile1] assign[=] call[name[ofunc], parameter[call[name[tups]][constant[0]], constant[r]]]
variable[fr1] assign[=] call[name[iter], parameter[name[ofile1]]]
variable[quart1] assign[=] call[name[itertools].izip, parameter[name[fr1], name[fr1], name[fr1], name[fr1]]]
if call[name[tups]][constant[1]] begin[:]
variable[ofile2] assign[=] call[name[ofunc], parameter[call[name[tups]][constant[1]], constant[r]]]
variable[fr2] assign[=] call[name[iter], parameter[name[ofile2]]]
variable[quart2] assign[=] call[name[itertools].izip, parameter[name[fr2], name[fr2], name[fr2], name[fr2]]]
variable[quarts] assign[=] call[name[itertools].izip, parameter[name[quart1], name[quart2]]]
while constant[1] begin[:]
<ast.Try object at 0x7da20e9b07c0>
variable[barcode] assign[=] constant[]
if compare[constant[3rad] in call[name[data].paramsdict][constant[datatype]]] begin[:]
variable[barcode1] assign[=] call[name[find3radbcode], parameter[]]
variable[barcode2] assign[=] call[name[find3radbcode], parameter[]]
variable[barcode] assign[=] binary_operation[binary_operation[name[barcode1] + constant[+]] + name[barcode2]]
variable[sname_match] assign[=] call[name[matchdict].get, parameter[name[barcode]]]
if name[sname_match] begin[:]
call[call[name[dbars]][name[sname_match]].add, parameter[name[barcode]]]
<ast.AugAssign object at 0x7da1b007df30>
<ast.AugAssign object at 0x7da1b007e260>
<ast.AugAssign object at 0x7da1b007c580>
<ast.AugAssign object at 0x7da1b007d630>
if compare[name[barcode] in name[barhits]] begin[:]
<ast.AugAssign object at 0x7da1b007c250>
variable[lenbar] assign[=] call[name[len], parameter[name[barcode]]]
if compare[constant[3rad] in call[name[data].paramsdict][constant[datatype]]] begin[:]
variable[lenbar] assign[=] call[name[len], parameter[name[barcode1]]]
if compare[call[name[data].paramsdict][constant[datatype]] equal[==] constant[2brad]] begin[:]
variable[overlen] assign[=] binary_operation[binary_operation[call[name[len], parameter[call[call[name[cutters]][constant[0]]][constant[0]]]] + name[lenbar]] + constant[1]]
call[name[read1]][constant[1]] assign[=] binary_operation[call[call[name[read1]][constant[1]]][<ast.Slice object at 0x7da1b007fdf0>] + constant[
]]
call[name[read1]][constant[3]] assign[=] binary_operation[call[call[name[read1]][constant[3]]][<ast.Slice object at 0x7da1b007d900>] + constant[
]]
if compare[constant[3rad] in call[name[data].paramsdict][constant[datatype]]] begin[:]
variable[read2] assign[=] call[name[list], parameter[name[read2]]]
call[name[read2]][constant[1]] assign[=] call[call[name[read2]][constant[1]]][<ast.Slice object at 0x7da1b007c550>]
call[name[read2]][constant[3]] assign[=] call[call[name[read2]][constant[3]]][<ast.Slice object at 0x7da1b007fa30>]
call[call[name[dsort1]][name[sname_match]].append, parameter[call[constant[].join, parameter[name[read1]]]]]
if compare[constant[pair] in call[name[data].paramsdict][constant[datatype]]] begin[:]
call[call[name[dsort2]][name[sname_match]].append, parameter[call[constant[].join, parameter[name[read2]]]]]
if <ast.UnaryOp object at 0x7da207f986d0> begin[:]
call[name[writetofile], parameter[name[data], name[dsort1], constant[1], name[epid]]]
if compare[constant[pair] in call[name[data].paramsdict][constant[datatype]]] begin[:]
call[name[writetofile], parameter[name[data], name[dsort2], constant[2], name[epid]]]
for taget[name[sample]] in starred[name[data].barcodes] begin[:]
if compare[constant[-technical-replicate-] in name[sname]] begin[:]
variable[sname] assign[=] call[call[name[sname].rsplit, parameter[constant[-technical-replicate], constant[1]]]][constant[0]]
call[name[dsort1]][name[sname]] assign[=] list[[]]
call[name[dsort2]][name[sname]] assign[=] list[[]]
call[name[ofile1].close, parameter[]]
if call[name[tups]][constant[1]] begin[:]
call[name[ofile2].close, parameter[]]
call[name[writetofile], parameter[name[data], name[dsort1], constant[1], name[epid]]]
if compare[constant[pair] in call[name[data].paramsdict][constant[datatype]]] begin[:]
call[name[writetofile], parameter[name[data], name[dsort2], constant[2], name[epid]]]
variable[samplestats] assign[=] list[[<ast.Name object at 0x7da207f98e20>, <ast.Name object at 0x7da207f99390>, <ast.Name object at 0x7da207f9a1d0>, <ast.Name object at 0x7da207f98b20>]]
variable[outname] assign[=] call[name[os].path.join, parameter[name[data].dirs.fastqs, call[constant[tmp_{}_{}.p].format, parameter[name[epid], name[fnum]]]]]
with call[name[open], parameter[name[outname], constant[w]]] begin[:]
call[name[pickle].dump, parameter[list[[<ast.Name object at 0x7da207f983a0>, <ast.Name object at 0x7da207f98370>]], name[wout]]]
return[name[outname]] | keyword[def] identifier[barmatch2] ( identifier[data] , identifier[tups] , identifier[cutters] , identifier[longbar] , identifier[matchdict] , identifier[fnum] ):
literal[string]
identifier[waitchunk] = identifier[int] ( literal[int] )
identifier[epid] = identifier[os] . identifier[getpid] ()
identifier[filestat] = identifier[np] . identifier[zeros] ( literal[int] , identifier[dtype] = identifier[np] . identifier[int] )
identifier[samplehits] ={}
identifier[dsort1] ={}
identifier[dsort2] ={}
identifier[dbars] ={}
keyword[for] identifier[sname] keyword[in] identifier[data] . identifier[barcodes] :
keyword[if] literal[string] keyword[in] identifier[sname] :
identifier[sname] = identifier[sname] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[samplehits] [ identifier[sname] ]= literal[int]
identifier[dsort1] [ identifier[sname] ]=[]
identifier[dsort2] [ identifier[sname] ]=[]
identifier[dbars] [ identifier[sname] ]= identifier[set] ()
identifier[barhits] ={}
keyword[for] identifier[barc] keyword[in] identifier[matchdict] :
identifier[barhits] [ identifier[barc] ]= literal[int]
identifier[misses] ={}
identifier[misses] [ literal[string] ]= literal[int]
identifier[getbarcode] = identifier[get_barcode_func] ( identifier[data] , identifier[longbar] )
keyword[if] identifier[tups] [ literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[ofunc] = identifier[gzip] . identifier[open]
keyword[else] :
identifier[ofunc] = identifier[open]
identifier[ofile1] = identifier[ofunc] ( identifier[tups] [ literal[int] ], literal[string] )
identifier[fr1] = identifier[iter] ( identifier[ofile1] )
identifier[quart1] = identifier[itertools] . identifier[izip] ( identifier[fr1] , identifier[fr1] , identifier[fr1] , identifier[fr1] )
keyword[if] identifier[tups] [ literal[int] ]:
identifier[ofile2] = identifier[ofunc] ( identifier[tups] [ literal[int] ], literal[string] )
identifier[fr2] = identifier[iter] ( identifier[ofile2] )
identifier[quart2] = identifier[itertools] . identifier[izip] ( identifier[fr2] , identifier[fr2] , identifier[fr2] , identifier[fr2] )
identifier[quarts] = identifier[itertools] . identifier[izip] ( identifier[quart1] , identifier[quart2] )
keyword[else] :
identifier[quarts] = identifier[itertools] . identifier[izip] ( identifier[quart1] , identifier[iter] ( identifier[int] , literal[int] ))
keyword[while] literal[int] :
keyword[try] :
identifier[read1] , identifier[read2] = identifier[quarts] . identifier[next] ()
identifier[read1] = identifier[list] ( identifier[read1] )
identifier[filestat] [ literal[int] ]+= literal[int]
keyword[except] identifier[StopIteration] :
keyword[break]
identifier[barcode] = literal[string]
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[barcode1] = identifier[find3radbcode] ( identifier[cutters] = identifier[cutters] ,
identifier[longbar] = identifier[longbar] , identifier[read1] = identifier[read1] )
identifier[barcode2] = identifier[find3radbcode] ( identifier[cutters] = identifier[cutters] ,
identifier[longbar] =( identifier[longbar] [ literal[int] ], identifier[longbar] [ literal[int] ]), identifier[read1] = identifier[read2] )
identifier[barcode] = identifier[barcode1] + literal[string] + identifier[barcode2]
keyword[else] :
identifier[barcode] = identifier[getbarcode] ( identifier[cutters] , identifier[read1] , identifier[longbar] )
identifier[sname_match] = identifier[matchdict] . identifier[get] ( identifier[barcode] )
keyword[if] identifier[sname_match] :
identifier[dbars] [ identifier[sname_match] ]. identifier[add] ( identifier[barcode] )
identifier[filestat] [ literal[int] ]+= literal[int]
identifier[filestat] [ literal[int] ]+= literal[int]
identifier[samplehits] [ identifier[sname_match] ]+= literal[int]
identifier[barhits] [ identifier[barcode] ]+= literal[int]
keyword[if] identifier[barcode] keyword[in] identifier[barhits] :
identifier[barhits] [ identifier[barcode] ]+= literal[int]
keyword[else] :
identifier[barhits] [ identifier[barcode] ]= literal[int]
identifier[lenbar] = identifier[len] ( identifier[barcode] )
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[lenbar] = identifier[len] ( identifier[barcode1] )
keyword[if] identifier[data] . identifier[paramsdict] [ literal[string] ]== literal[string] :
identifier[overlen] = identifier[len] ( identifier[cutters] [ literal[int] ][ literal[int] ])+ identifier[lenbar] + literal[int]
identifier[read1] [ literal[int] ]= identifier[read1] [ literal[int] ][:- identifier[overlen] ]+ literal[string]
identifier[read1] [ literal[int] ]= identifier[read1] [ literal[int] ][:- identifier[overlen] ]+ literal[string]
keyword[else] :
identifier[read1] [ literal[int] ]= identifier[read1] [ literal[int] ][ identifier[lenbar] :]
identifier[read1] [ literal[int] ]= identifier[read1] [ literal[int] ][ identifier[lenbar] :]
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[read2] = identifier[list] ( identifier[read2] )
identifier[read2] [ literal[int] ]= identifier[read2] [ literal[int] ][ identifier[len] ( identifier[barcode2] ):]
identifier[read2] [ literal[int] ]= identifier[read2] [ literal[int] ][ identifier[len] ( identifier[barcode2] ):]
identifier[dsort1] [ identifier[sname_match] ]. identifier[append] ( literal[string] . identifier[join] ( identifier[read1] ))
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[dsort2] [ identifier[sname_match] ]. identifier[append] ( literal[string] . identifier[join] ( identifier[read2] ))
keyword[else] :
identifier[misses] [ literal[string] ]+= literal[int]
keyword[if] identifier[barcode] :
identifier[filestat] [ literal[int] ]+= literal[int]
keyword[if] keyword[not] identifier[filestat] [ literal[int] ]% identifier[waitchunk] :
identifier[writetofile] ( identifier[data] , identifier[dsort1] , literal[int] , identifier[epid] )
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[writetofile] ( identifier[data] , identifier[dsort2] , literal[int] , identifier[epid] )
keyword[for] identifier[sample] keyword[in] identifier[data] . identifier[barcodes] :
keyword[if] literal[string] keyword[in] identifier[sname] :
identifier[sname] = identifier[sname] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[dsort1] [ identifier[sname] ]=[]
identifier[dsort2] [ identifier[sname] ]=[]
identifier[ofile1] . identifier[close] ()
keyword[if] identifier[tups] [ literal[int] ]:
identifier[ofile2] . identifier[close] ()
identifier[writetofile] ( identifier[data] , identifier[dsort1] , literal[int] , identifier[epid] )
keyword[if] literal[string] keyword[in] identifier[data] . identifier[paramsdict] [ literal[string] ]:
identifier[writetofile] ( identifier[data] , identifier[dsort2] , literal[int] , identifier[epid] )
identifier[samplestats] =[ identifier[samplehits] , identifier[barhits] , identifier[misses] , identifier[dbars] ]
identifier[outname] = identifier[os] . identifier[path] . identifier[join] ( identifier[data] . identifier[dirs] . identifier[fastqs] , literal[string] . identifier[format] ( identifier[epid] , identifier[fnum] ))
keyword[with] identifier[open] ( identifier[outname] , literal[string] ) keyword[as] identifier[wout] :
identifier[pickle] . identifier[dump] ([ identifier[filestat] , identifier[samplestats] ], identifier[wout] )
keyword[return] identifier[outname] | def barmatch2(data, tups, cutters, longbar, matchdict, fnum):
"""
cleaner barmatch func...
"""
## how many reads to store before writing to disk
waitchunk = int(1000000.0)
## pid name for this engine
epid = os.getpid()
## counters for total reads, those with cutsite, and those that matched
filestat = np.zeros(3, dtype=np.int)
## store observed sample matches
samplehits = {}
## dictionaries to store first and second reads until writing to file
dsort1 = {}
dsort2 = {}
## dictionary for all bars matched in sample
dbars = {}
## fill for sample names
for sname in data.barcodes:
if '-technical-replicate-' in sname:
sname = sname.rsplit('-technical-replicate', 1)[0] # depends on [control=['if'], data=['sname']]
samplehits[sname] = 0
dsort1[sname] = []
dsort2[sname] = []
dbars[sname] = set() # depends on [control=['for'], data=['sname']]
## store observed bars
barhits = {}
for barc in matchdict:
barhits[barc] = 0 # depends on [control=['for'], data=['barc']]
## store others
misses = {}
misses['_'] = 0
## build func for finding barcode
getbarcode = get_barcode_func(data, longbar)
## get quart iterator of reads
if tups[0].endswith('.gz'):
ofunc = gzip.open # depends on [control=['if'], data=[]]
else:
ofunc = open ## create iterators
ofile1 = ofunc(tups[0], 'r')
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], 'r')
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2) # depends on [control=['if'], data=[]]
else:
quarts = itertools.izip(quart1, iter(int, 1))
## go until end of the file
while 1:
try:
(read1, read2) = quarts.next()
read1 = list(read1)
filestat[0] += 1 # depends on [control=['try'], data=[]]
except StopIteration:
break # depends on [control=['except'], data=[]]
barcode = ''
## Get barcode_R2 and check for matching sample name
if '3rad' in data.paramsdict['datatype']:
## Here we're just reusing the findbcode function
## for R2, and reconfiguring the longbar tuple to have the
## maxlen for the R2 barcode
## Parse barcode. Use the parsing function selected above.
barcode1 = find3radbcode(cutters=cutters, longbar=longbar, read1=read1)
barcode2 = find3radbcode(cutters=cutters, longbar=(longbar[2], longbar[1]), read1=read2)
barcode = barcode1 + '+' + barcode2 # depends on [control=['if'], data=[]]
else:
## Parse barcode. Uses the parsing function selected above.
barcode = getbarcode(cutters, read1, longbar) ## find if it matches
sname_match = matchdict.get(barcode)
if sname_match:
#sample_index[filestat[0]-1] = snames.index(sname_match) + 1
## record who matched
dbars[sname_match].add(barcode)
filestat[1] += 1
filestat[2] += 1
samplehits[sname_match] += 1
barhits[barcode] += 1
if barcode in barhits:
barhits[barcode] += 1 # depends on [control=['if'], data=['barcode', 'barhits']]
else:
barhits[barcode] = 1
## trim off barcode
lenbar = len(barcode)
if '3rad' in data.paramsdict['datatype']:
## Iff 3rad trim the len of the first barcode
lenbar = len(barcode1) # depends on [control=['if'], data=[]]
if data.paramsdict['datatype'] == '2brad':
overlen = len(cutters[0][0]) + lenbar + 1
read1[1] = read1[1][:-overlen] + '\n'
read1[3] = read1[3][:-overlen] + '\n' # depends on [control=['if'], data=[]]
else:
read1[1] = read1[1][lenbar:]
read1[3] = read1[3][lenbar:]
## Trim barcode off R2 and append. Only 3rad datatype
## pays the cpu cost of splitting R2
if '3rad' in data.paramsdict['datatype']:
read2 = list(read2)
read2[1] = read2[1][len(barcode2):]
read2[3] = read2[3][len(barcode2):] # depends on [control=['if'], data=[]]
## append to dsort
dsort1[sname_match].append(''.join(read1))
if 'pair' in data.paramsdict['datatype']:
dsort2[sname_match].append(''.join(read2)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
misses['_'] += 1
if barcode:
filestat[1] += 1 # depends on [control=['if'], data=[]]
## how can we make it so all of the engines aren't trying to write to
## ~100-200 files all at the same time? This is the I/O limit we hit..
## write out at 100K to keep memory low. It is fine on HPC which can
## write parallel, but regular systems might crash
if not filestat[0] % waitchunk:
## write the remaining reads to file"
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict['datatype']:
writetofile(data, dsort2, 2, epid) # depends on [control=['if'], data=[]]
## clear out dsorts
for sample in data.barcodes:
if '-technical-replicate-' in sname:
sname = sname.rsplit('-technical-replicate', 1)[0] # depends on [control=['if'], data=['sname']]
dsort1[sname] = []
dsort2[sname] = [] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
## reset longlist
#longlist = np.zeros(waitchunk, dtype=np.uint32)
## close open files
ofile1.close()
if tups[1]:
ofile2.close() # depends on [control=['if'], data=[]]
## write the remaining reads to file
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict['datatype']:
writetofile(data, dsort2, 2, epid) # depends on [control=['if'], data=[]]
## return stats in saved pickle b/c return_queue is too small
## and the size of the match dictionary can become quite large
samplestats = [samplehits, barhits, misses, dbars]
outname = os.path.join(data.dirs.fastqs, 'tmp_{}_{}.p'.format(epid, fnum))
with open(outname, 'w') as wout:
pickle.dump([filestat, samplestats], wout) # depends on [control=['with'], data=['wout']]
return outname |
def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
if isinstance(value, types.ModuleType):
return describe_file(value)
elif isinstance(value, messages.Field):
return describe_field(value)
elif isinstance(value, messages.Enum):
return describe_enum_value(value)
elif isinstance(value, type):
if issubclass(value, messages.Message):
return describe_message(value)
elif issubclass(value, messages.Enum):
return describe_enum(value)
return None | def function[describe, parameter[value]]:
constant[Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
]
if call[name[isinstance], parameter[name[value], name[types].ModuleType]] begin[:]
return[call[name[describe_file], parameter[name[value]]]]
return[constant[None]] | keyword[def] identifier[describe] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[types] . identifier[ModuleType] ):
keyword[return] identifier[describe_file] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[messages] . identifier[Field] ):
keyword[return] identifier[describe_field] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[messages] . identifier[Enum] ):
keyword[return] identifier[describe_enum_value] ( identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[type] ):
keyword[if] identifier[issubclass] ( identifier[value] , identifier[messages] . identifier[Message] ):
keyword[return] identifier[describe_message] ( identifier[value] )
keyword[elif] identifier[issubclass] ( identifier[value] , identifier[messages] . identifier[Enum] ):
keyword[return] identifier[describe_enum] ( identifier[value] )
keyword[return] keyword[None] | def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
if isinstance(value, types.ModuleType):
return describe_file(value) # depends on [control=['if'], data=[]]
elif isinstance(value, messages.Field):
return describe_field(value) # depends on [control=['if'], data=[]]
elif isinstance(value, messages.Enum):
return describe_enum_value(value) # depends on [control=['if'], data=[]]
elif isinstance(value, type):
if issubclass(value, messages.Message):
return describe_message(value) # depends on [control=['if'], data=[]]
elif issubclass(value, messages.Enum):
return describe_enum(value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def get_parent(self):
"""Get the parent ``Expression`` for this object.
Returns:
Expression: The ``Expression`` which contains this object.
Raises:
FiqlObjectException: Parent is ``None``.
"""
if not isinstance(self.parent, Expression):
raise FiqlObjectException("Parent must be of %s not %s" % (
Expression, type(self.parent)))
return self.parent | def function[get_parent, parameter[self]]:
constant[Get the parent ``Expression`` for this object.
Returns:
Expression: The ``Expression`` which contains this object.
Raises:
FiqlObjectException: Parent is ``None``.
]
if <ast.UnaryOp object at 0x7da1b03502e0> begin[:]
<ast.Raise object at 0x7da1b0350070>
return[name[self].parent] | keyword[def] identifier[get_parent] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[parent] , identifier[Expression] ):
keyword[raise] identifier[FiqlObjectException] ( literal[string] %(
identifier[Expression] , identifier[type] ( identifier[self] . identifier[parent] )))
keyword[return] identifier[self] . identifier[parent] | def get_parent(self):
"""Get the parent ``Expression`` for this object.
Returns:
Expression: The ``Expression`` which contains this object.
Raises:
FiqlObjectException: Parent is ``None``.
"""
if not isinstance(self.parent, Expression):
raise FiqlObjectException('Parent must be of %s not %s' % (Expression, type(self.parent))) # depends on [control=['if'], data=[]]
return self.parent |
def Y_ampl(self, new_y_scale):
"""Make scaling on Y axis using predefined values"""
self.parent.value('y_scale', new_y_scale)
self.parent.traces.display() | def function[Y_ampl, parameter[self, new_y_scale]]:
constant[Make scaling on Y axis using predefined values]
call[name[self].parent.value, parameter[constant[y_scale], name[new_y_scale]]]
call[name[self].parent.traces.display, parameter[]] | keyword[def] identifier[Y_ampl] ( identifier[self] , identifier[new_y_scale] ):
literal[string]
identifier[self] . identifier[parent] . identifier[value] ( literal[string] , identifier[new_y_scale] )
identifier[self] . identifier[parent] . identifier[traces] . identifier[display] () | def Y_ampl(self, new_y_scale):
"""Make scaling on Y axis using predefined values"""
self.parent.value('y_scale', new_y_scale)
self.parent.traces.display() |
def free(self):
"""Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
"""
data = WebDavXmlUtils.create_free_space_request_content()
response = self.execute_request(action='free', path='', data=data)
return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname) | def function[free, parameter[self]]:
constant[Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
]
variable[data] assign[=] call[name[WebDavXmlUtils].create_free_space_request_content, parameter[]]
variable[response] assign[=] call[name[self].execute_request, parameter[]]
return[call[name[WebDavXmlUtils].parse_free_space_response, parameter[name[response].content, name[self].webdav.hostname]]] | keyword[def] identifier[free] ( identifier[self] ):
literal[string]
identifier[data] = identifier[WebDavXmlUtils] . identifier[create_free_space_request_content] ()
identifier[response] = identifier[self] . identifier[execute_request] ( identifier[action] = literal[string] , identifier[path] = literal[string] , identifier[data] = identifier[data] )
keyword[return] identifier[WebDavXmlUtils] . identifier[parse_free_space_response] ( identifier[response] . identifier[content] , identifier[self] . identifier[webdav] . identifier[hostname] ) | def free(self):
"""Returns an amount of free space on remote WebDAV server.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:return: an amount of free space in bytes.
"""
data = WebDavXmlUtils.create_free_space_request_content()
response = self.execute_request(action='free', path='', data=data)
return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname) |
def as_block_string(txt):
"""Return a string formatted as a python block comment string, like the one
you're currently reading. Special characters are escaped if necessary.
"""
import json
lines = []
for line in txt.split('\n'):
line_ = json.dumps(line)
line_ = line_[1:-1].rstrip() # drop double quotes
lines.append(line_)
return '"""\n%s\n"""' % '\n'.join(lines) | def function[as_block_string, parameter[txt]]:
constant[Return a string formatted as a python block comment string, like the one
you're currently reading. Special characters are escaped if necessary.
]
import module[json]
variable[lines] assign[=] list[[]]
for taget[name[line]] in starred[call[name[txt].split, parameter[constant[
]]]] begin[:]
variable[line_] assign[=] call[name[json].dumps, parameter[name[line]]]
variable[line_] assign[=] call[call[name[line_]][<ast.Slice object at 0x7da1b17ed1b0>].rstrip, parameter[]]
call[name[lines].append, parameter[name[line_]]]
return[binary_operation[constant["""
%s
"""] <ast.Mod object at 0x7da2590d6920> call[constant[
].join, parameter[name[lines]]]]] | keyword[def] identifier[as_block_string] ( identifier[txt] ):
literal[string]
keyword[import] identifier[json]
identifier[lines] =[]
keyword[for] identifier[line] keyword[in] identifier[txt] . identifier[split] ( literal[string] ):
identifier[line_] = identifier[json] . identifier[dumps] ( identifier[line] )
identifier[line_] = identifier[line_] [ literal[int] :- literal[int] ]. identifier[rstrip] ()
identifier[lines] . identifier[append] ( identifier[line_] )
keyword[return] literal[string] % literal[string] . identifier[join] ( identifier[lines] ) | def as_block_string(txt):
"""Return a string formatted as a python block comment string, like the one
you're currently reading. Special characters are escaped if necessary.
"""
import json
lines = []
for line in txt.split('\n'):
line_ = json.dumps(line)
line_ = line_[1:-1].rstrip() # drop double quotes
lines.append(line_) # depends on [control=['for'], data=['line']]
return '"""\n%s\n"""' % '\n'.join(lines) |
def _breadcrumbs(path):
"""Return breadcrumb dict from path."""
full = None
crumbs = []
for part in path_yield(path):
full = path_join(full, part) if full else part
crumbs.append((full, part))
return crumbs | def function[_breadcrumbs, parameter[path]]:
constant[Return breadcrumb dict from path.]
variable[full] assign[=] constant[None]
variable[crumbs] assign[=] list[[]]
for taget[name[part]] in starred[call[name[path_yield], parameter[name[path]]]] begin[:]
variable[full] assign[=] <ast.IfExp object at 0x7da18f00cca0>
call[name[crumbs].append, parameter[tuple[[<ast.Name object at 0x7da18f00c100>, <ast.Name object at 0x7da18f00d420>]]]]
return[name[crumbs]] | keyword[def] identifier[_breadcrumbs] ( identifier[path] ):
literal[string]
identifier[full] = keyword[None]
identifier[crumbs] =[]
keyword[for] identifier[part] keyword[in] identifier[path_yield] ( identifier[path] ):
identifier[full] = identifier[path_join] ( identifier[full] , identifier[part] ) keyword[if] identifier[full] keyword[else] identifier[part]
identifier[crumbs] . identifier[append] (( identifier[full] , identifier[part] ))
keyword[return] identifier[crumbs] | def _breadcrumbs(path):
"""Return breadcrumb dict from path."""
full = None
crumbs = []
for part in path_yield(path):
full = path_join(full, part) if full else part
crumbs.append((full, part)) # depends on [control=['for'], data=['part']]
return crumbs |
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
"""
# TODO: rethink use of multiple database sources - may lead to inconsistency with genome sources
successfully_mapped_counter = 0
for g in tqdm(self.genes):
repseq = g.protein.set_representative_sequence(force_rerun=force_rerun)
if repseq:
if repseq.sequence_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes with a representative sequence'.format(len(self.genes_with_a_representative_sequence),
len(self.genes)))
log.info('See the "df_representative_sequences" attribute for a summary dataframe.') | def function[set_representative_sequence, parameter[self, force_rerun]]:
constant[Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
]
variable[successfully_mapped_counter] assign[=] constant[0]
for taget[name[g]] in starred[call[name[tqdm], parameter[name[self].genes]]] begin[:]
variable[repseq] assign[=] call[name[g].protein.set_representative_sequence, parameter[]]
if name[repseq] begin[:]
if name[repseq].sequence_file begin[:]
<ast.AugAssign object at 0x7da204622b90>
call[name[log].info, parameter[call[constant[{}/{}: number of genes with a representative sequence].format, parameter[call[name[len], parameter[name[self].genes_with_a_representative_sequence]], call[name[len], parameter[name[self].genes]]]]]]
call[name[log].info, parameter[constant[See the "df_representative_sequences" attribute for a summary dataframe.]]] | keyword[def] identifier[set_representative_sequence] ( identifier[self] , identifier[force_rerun] = keyword[False] ):
literal[string]
identifier[successfully_mapped_counter] = literal[int]
keyword[for] identifier[g] keyword[in] identifier[tqdm] ( identifier[self] . identifier[genes] ):
identifier[repseq] = identifier[g] . identifier[protein] . identifier[set_representative_sequence] ( identifier[force_rerun] = identifier[force_rerun] )
keyword[if] identifier[repseq] :
keyword[if] identifier[repseq] . identifier[sequence_file] :
identifier[successfully_mapped_counter] += literal[int]
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[genes_with_a_representative_sequence] ),
identifier[len] ( identifier[self] . identifier[genes] )))
identifier[log] . identifier[info] ( literal[string] ) | def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
"""
# TODO: rethink use of multiple database sources - may lead to inconsistency with genome sources
successfully_mapped_counter = 0
for g in tqdm(self.genes):
repseq = g.protein.set_representative_sequence(force_rerun=force_rerun)
if repseq:
if repseq.sequence_file:
successfully_mapped_counter += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']]
log.info('{}/{}: number of genes with a representative sequence'.format(len(self.genes_with_a_representative_sequence), len(self.genes)))
log.info('See the "df_representative_sequences" attribute for a summary dataframe.') |
def get_filter_kwargs(self):
"""
Translates the cleaned data into a dictionary
that can used to generate the filter removing
blank values.
"""
if self.is_valid():
filter_kwargs = {}
for field in self.get_filter_fields():
empty_values = EMPTY_VALUES
if hasattr(self.fields[field], 'empty_values'):
empty_values = self.fields[field].empty_values
value = self.cleaned_data.get(field)
if not value in empty_values:
if self.search_fields and field in self.search_fields:
filter_kwargs["%s__icontains" % field] = value
else:
filter_kwargs[field] = value
return filter_kwargs
else:
return {} | def function[get_filter_kwargs, parameter[self]]:
constant[
Translates the cleaned data into a dictionary
that can used to generate the filter removing
blank values.
]
if call[name[self].is_valid, parameter[]] begin[:]
variable[filter_kwargs] assign[=] dictionary[[], []]
for taget[name[field]] in starred[call[name[self].get_filter_fields, parameter[]]] begin[:]
variable[empty_values] assign[=] name[EMPTY_VALUES]
if call[name[hasattr], parameter[call[name[self].fields][name[field]], constant[empty_values]]] begin[:]
variable[empty_values] assign[=] call[name[self].fields][name[field]].empty_values
variable[value] assign[=] call[name[self].cleaned_data.get, parameter[name[field]]]
if <ast.UnaryOp object at 0x7da1b0aee050> begin[:]
if <ast.BoolOp object at 0x7da1b0aefa30> begin[:]
call[name[filter_kwargs]][binary_operation[constant[%s__icontains] <ast.Mod object at 0x7da2590d6920> name[field]]] assign[=] name[value]
return[name[filter_kwargs]] | keyword[def] identifier[get_filter_kwargs] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_valid] ():
identifier[filter_kwargs] ={}
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[get_filter_fields] ():
identifier[empty_values] = identifier[EMPTY_VALUES]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[fields] [ identifier[field] ], literal[string] ):
identifier[empty_values] = identifier[self] . identifier[fields] [ identifier[field] ]. identifier[empty_values]
identifier[value] = identifier[self] . identifier[cleaned_data] . identifier[get] ( identifier[field] )
keyword[if] keyword[not] identifier[value] keyword[in] identifier[empty_values] :
keyword[if] identifier[self] . identifier[search_fields] keyword[and] identifier[field] keyword[in] identifier[self] . identifier[search_fields] :
identifier[filter_kwargs] [ literal[string] % identifier[field] ]= identifier[value]
keyword[else] :
identifier[filter_kwargs] [ identifier[field] ]= identifier[value]
keyword[return] identifier[filter_kwargs]
keyword[else] :
keyword[return] {} | def get_filter_kwargs(self):
"""
Translates the cleaned data into a dictionary
that can used to generate the filter removing
blank values.
"""
if self.is_valid():
filter_kwargs = {}
for field in self.get_filter_fields():
empty_values = EMPTY_VALUES
if hasattr(self.fields[field], 'empty_values'):
empty_values = self.fields[field].empty_values # depends on [control=['if'], data=[]]
value = self.cleaned_data.get(field)
if not value in empty_values:
if self.search_fields and field in self.search_fields:
filter_kwargs['%s__icontains' % field] = value # depends on [control=['if'], data=[]]
else:
filter_kwargs[field] = value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
return filter_kwargs # depends on [control=['if'], data=[]]
else:
return {} |
def _getColumnNeighborhood(self, centerColumn):
"""
Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood.
"""
if self._wrapAround:
return topology.wrappingNeighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions)
else:
return topology.neighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions) | def function[_getColumnNeighborhood, parameter[self, centerColumn]]:
constant[
Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood.
]
if name[self]._wrapAround begin[:]
return[call[name[topology].wrappingNeighborhood, parameter[name[centerColumn], name[self]._inhibitionRadius, name[self]._columnDimensions]]] | keyword[def] identifier[_getColumnNeighborhood] ( identifier[self] , identifier[centerColumn] ):
literal[string]
keyword[if] identifier[self] . identifier[_wrapAround] :
keyword[return] identifier[topology] . identifier[wrappingNeighborhood] ( identifier[centerColumn] ,
identifier[self] . identifier[_inhibitionRadius] ,
identifier[self] . identifier[_columnDimensions] )
keyword[else] :
keyword[return] identifier[topology] . identifier[neighborhood] ( identifier[centerColumn] ,
identifier[self] . identifier[_inhibitionRadius] ,
identifier[self] . identifier[_columnDimensions] ) | def _getColumnNeighborhood(self, centerColumn):
"""
Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood.
"""
if self._wrapAround:
return topology.wrappingNeighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions) # depends on [control=['if'], data=[]]
else:
return topology.neighborhood(centerColumn, self._inhibitionRadius, self._columnDimensions) |
def recognize_using_websocket(self,
audio,
content_type,
recognize_callback,
model=None,
language_customization_id=None,
acoustic_customization_id=None,
customization_weight=None,
base_model_version=None,
inactivity_timeout=None,
interim_results=None,
keywords=None,
keywords_threshold=None,
max_alternatives=None,
word_alternatives_threshold=None,
word_confidence=None,
timestamps=None,
profanity_filter=None,
smart_formatting=None,
speaker_labels=None,
http_proxy_host=None,
http_proxy_port=None,
customization_id=None,
grammar_name=None,
redaction=None,
**kwargs):
"""
Sends audio for speech recognition using web sockets.
:param AudioSource audio: The audio to transcribe in the format specified by the
`Content-Type` header.
:param str content_type: The type of the input: audio/basic, audio/flac,
audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus,
audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or
audio/webm;codecs=vorbis.
:param RecognizeCallback recognize_callback: The callback method for the websocket.
:param str model: The identifier of the model that is to be used for the
recognition request or, for the **Create a session** method, with the new session.
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with service credentials created for
the instance of the service that owns the custom model. By default, no custom
language model is used. See [Custom
models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request or, for the
**Create a session** method, with the new session. The base model of the specified
custom acoustic model must match the model specified with the `model` parameter.
You must make the request with service credentials created for the instance of the
service that owns the custom model. By default, no custom acoustic model is used.
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request or, for sessions, with the
**Create a session** method, the customization weight tells the service how much
weight to give to words from the custom language model compared to those from the
base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
:param str base_model_version: The version of the specified base model that is to
be used with recognition request or, for the **Create a session** method, with the
new session. Multiple versions of a base model can exist when a model is updated
for internal improvements. The parameter is intended primarily for use with custom
models that have been upgraded for a new base model. The default value depends on
whether the parameter is used with or without a custom model. For more
information, see [Base model
version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in submitted audio, the connection is closed with a 400
error. Useful for stopping audio submission from a live microphone when a user
simply walks away. Use `-1` for infinity.
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more tokens. Keywords are spotted only in the
final hypothesis, not in interim results. If you specify any keywords, you must
also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit
the parameter or specify an empty array if you do not need to spot keywords.
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No keyword spotting is performed if you omit the parameter. If you
specify a threshold, you must also specify one or more keywords.
:param int max_alternatives: The maximum number of alternative transcripts to be
returned. By default, a single transcription is returned.
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
\"Confusion Networks\"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No alternative words are computed if you omit the parameter.
:param bool word_confidence: If `true`, a confidence measure in the range of 0 to
1 is returned for each word. By default, no word confidence measures are returned.
:param bool timestamps: If `true`, time alignment is returned for each word. By
default, no timestamps are returned.
:param bool profanity_filter: If `true` (the default), filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only.
:param bool smart_formatting: If `true`, converts dates, times, series of digits
and numbers, phone numbers, currency values, and internet addresses into more
readable, conventional representations in the final transcript of a recognition
request. For US English, also converts certain keyword strings to punctuation
symbols. By default, no smart formatting is performed. Applies to US English and
Spanish transcription only.
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, no speaker labels are returned. Setting `speaker_labels` to `true` forces
the `timestamps` parameter to be `true`, regardless of whether you specify `false`
for the parameter.
To determine whether a language model supports speaker labels, use the **Get
models** method and check that the attribute `speaker_labels` is set to `true`.
You can also refer to [Speaker
labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels).
:param str http_proxy_host: http proxy host name.
:param str http_proxy_port: http proxy port. If not set, set to 80.
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param dict headers: A `dict` containing the request headers
:return: A `dict` containing the `SpeechRecognitionResults` response.
:rtype: dict
"""
if audio is None:
raise ValueError('audio must be provided')
if not isinstance(audio, AudioSource):
raise Exception(
'audio is not of type AudioSource. Import the class from ibm_watson.websocket')
if content_type is None:
raise ValueError('content_type must be provided')
if recognize_callback is None:
raise ValueError('recognize_callback must be provided')
if not isinstance(recognize_callback, RecognizeCallback):
raise Exception(
'Callback is not a derived class of RecognizeCallback')
headers = {}
if self.default_headers is not None:
headers = self.default_headers.copy()
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
if self.token_manager:
access_token = self.token_manager.get_token()
headers['Authorization'] = '{0} {1}'.format(BEARER, access_token)
else:
authstring = "{0}:{1}".format(self.username, self.password)
base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8')
headers['Authorization'] = 'Basic {0}'.format(base64_authorization)
url = self.url.replace('https:', 'wss:')
params = {
'model': model,
'customization_id': customization_id,
'acoustic_customization_id': acoustic_customization_id,
'customization_weight': customization_weight,
'base_model_version': base_model_version,
'language_customization_id': language_customization_id
}
params = dict([(k, v) for k, v in params.items() if v is not None])
url += '/v1/recognize?{0}'.format(urlencode(params))
options = {
'content_type': content_type,
'inactivity_timeout': inactivity_timeout,
'interim_results': interim_results,
'keywords': keywords,
'keywords_threshold': keywords_threshold,
'max_alternatives': max_alternatives,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': timestamps,
'profanity_filter': profanity_filter,
'smart_formatting': smart_formatting,
'speaker_labels': speaker_labels,
'grammar_name': grammar_name,
'redaction': redaction
}
options = dict([(k, v) for k, v in options.items() if v is not None])
RecognizeListener(audio,
options,
recognize_callback,
url,
headers,
http_proxy_host,
http_proxy_port,
self.verify) | def function[recognize_using_websocket, parameter[self, audio, content_type, recognize_callback, model, language_customization_id, acoustic_customization_id, customization_weight, base_model_version, inactivity_timeout, interim_results, keywords, keywords_threshold, max_alternatives, word_alternatives_threshold, word_confidence, timestamps, profanity_filter, smart_formatting, speaker_labels, http_proxy_host, http_proxy_port, customization_id, grammar_name, redaction]]:
constant[
Sends audio for speech recognition using web sockets.
:param AudioSource audio: The audio to transcribe in the format specified by the
`Content-Type` header.
:param str content_type: The type of the input: audio/basic, audio/flac,
audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus,
audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or
audio/webm;codecs=vorbis.
:param RecognizeCallback recognize_callback: The callback method for the websocket.
:param str model: The identifier of the model that is to be used for the
recognition request or, for the **Create a session** method, with the new session.
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with service credentials created for
the instance of the service that owns the custom model. By default, no custom
language model is used. See [Custom
models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request or, for the
**Create a session** method, with the new session. The base model of the specified
custom acoustic model must match the model specified with the `model` parameter.
You must make the request with service credentials created for the instance of the
service that owns the custom model. By default, no custom acoustic model is used.
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request or, for sessions, with the
**Create a session** method, the customization weight tells the service how much
weight to give to words from the custom language model compared to those from the
base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
:param str base_model_version: The version of the specified base model that is to
be used with recognition request or, for the **Create a session** method, with the
new session. Multiple versions of a base model can exist when a model is updated
for internal improvements. The parameter is intended primarily for use with custom
models that have been upgraded for a new base model. The default value depends on
whether the parameter is used with or without a custom model. For more
information, see [Base model
version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in submitted audio, the connection is closed with a 400
error. Useful for stopping audio submission from a live microphone when a user
simply walks away. Use `-1` for infinity.
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more tokens. Keywords are spotted only in the
final hypothesis, not in interim results. If you specify any keywords, you must
also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit
the parameter or specify an empty array if you do not need to spot keywords.
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No keyword spotting is performed if you omit the parameter. If you
specify a threshold, you must also specify one or more keywords.
:param int max_alternatives: The maximum number of alternative transcripts to be
returned. By default, a single transcription is returned.
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No alternative words are computed if you omit the parameter.
:param bool word_confidence: If `true`, a confidence measure in the range of 0 to
1 is returned for each word. By default, no word confidence measures are returned.
:param bool timestamps: If `true`, time alignment is returned for each word. By
default, no timestamps are returned.
:param bool profanity_filter: If `true` (the default), filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only.
:param bool smart_formatting: If `true`, converts dates, times, series of digits
and numbers, phone numbers, currency values, and internet addresses into more
readable, conventional representations in the final transcript of a recognition
request. For US English, also converts certain keyword strings to punctuation
symbols. By default, no smart formatting is performed. Applies to US English and
Spanish transcription only.
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, no speaker labels are returned. Setting `speaker_labels` to `true` forces
the `timestamps` parameter to be `true`, regardless of whether you specify `false`
for the parameter.
To determine whether a language model supports speaker labels, use the **Get
models** method and check that the attribute `speaker_labels` is set to `true`.
You can also refer to [Speaker
labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels).
:param str http_proxy_host: http proxy host name.
:param str http_proxy_port: http proxy port. If not set, set to 80.
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param dict headers: A `dict` containing the request headers
:return: A `dict` containing the `SpeechRecognitionResults` response.
:rtype: dict
]
if compare[name[audio] is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4cc160>
if <ast.UnaryOp object at 0x7da18c4cffd0> begin[:]
<ast.Raise object at 0x7da18c4cf4f0>
if compare[name[content_type] is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4ce6e0>
if compare[name[recognize_callback] is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4cdf30>
if <ast.UnaryOp object at 0x7da18c4cefe0> begin[:]
<ast.Raise object at 0x7da18c4ce860>
variable[headers] assign[=] dictionary[[], []]
if compare[name[self].default_headers is_not constant[None]] begin[:]
variable[headers] assign[=] call[name[self].default_headers.copy, parameter[]]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
if name[self].token_manager begin[:]
variable[access_token] assign[=] call[name[self].token_manager.get_token, parameter[]]
call[name[headers]][constant[Authorization]] assign[=] call[constant[{0} {1}].format, parameter[name[BEARER], name[access_token]]]
variable[url] assign[=] call[name[self].url.replace, parameter[constant[https:], constant[wss:]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a9c60>, <ast.Constant object at 0x7da20c6aa4a0>, <ast.Constant object at 0x7da20c6aa2c0>, <ast.Constant object at 0x7da20c6a9ab0>, <ast.Constant object at 0x7da20c6aa560>, <ast.Constant object at 0x7da20c6ab8b0>], [<ast.Name object at 0x7da20c6a9840>, <ast.Name object at 0x7da20c6a94e0>, <ast.Name object at 0x7da20c6a8ee0>, <ast.Name object at 0x7da20c6a8850>, <ast.Name object at 0x7da20c6a9e70>, <ast.Name object at 0x7da20c6a8d30>]]
variable[params] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da20c6a97b0>]]
<ast.AugAssign object at 0x7da20c6a8be0>
variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a9cc0>, <ast.Constant object at 0x7da20c6a8a30>, <ast.Constant object at 0x7da20c6a9570>, <ast.Constant object at 0x7da20c6a8dc0>, <ast.Constant object at 0x7da20c6a8700>, <ast.Constant object at 0x7da20c6a8c10>, <ast.Constant object at 0x7da20c6a8c40>, <ast.Constant object at 0x7da20c6a86a0>, <ast.Constant object at 0x7da20c6a9f00>, <ast.Constant object at 0x7da20c6a9ba0>, <ast.Constant object at 0x7da20c6a95a0>, <ast.Constant object at 0x7da20c6a9810>, <ast.Constant object at 0x7da20c6aa110>, <ast.Constant object at 0x7da20c6ab1c0>], [<ast.Name object at 0x7da20c6a8b20>, <ast.Name object at 0x7da20c6a9e10>, <ast.Name object at 0x7da20c6a9b40>, <ast.Name object at 0x7da20c6abdc0>, <ast.Name object at 0x7da20c6ab370>, <ast.Name object at 0x7da20c6aab60>, <ast.Name object at 0x7da20c6a8af0>, <ast.Name object at 0x7da20c6a87c0>, <ast.Name object at 0x7da20c6aac50>, <ast.Name object at 0x7da20c6ab5b0>, <ast.Name object at 0x7da20c6a9510>, <ast.Name object at 0x7da20c6aa710>, <ast.Name object at 0x7da20c6a85b0>, <ast.Name object at 0x7da20c6aa1d0>]]
variable[options] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da20c6aa1a0>]]
call[name[RecognizeListener], parameter[name[audio], name[options], name[recognize_callback], name[url], name[headers], name[http_proxy_host], name[http_proxy_port], name[self].verify]] | keyword[def] identifier[recognize_using_websocket] ( identifier[self] ,
identifier[audio] ,
identifier[content_type] ,
identifier[recognize_callback] ,
identifier[model] = keyword[None] ,
identifier[language_customization_id] = keyword[None] ,
identifier[acoustic_customization_id] = keyword[None] ,
identifier[customization_weight] = keyword[None] ,
identifier[base_model_version] = keyword[None] ,
identifier[inactivity_timeout] = keyword[None] ,
identifier[interim_results] = keyword[None] ,
identifier[keywords] = keyword[None] ,
identifier[keywords_threshold] = keyword[None] ,
identifier[max_alternatives] = keyword[None] ,
identifier[word_alternatives_threshold] = keyword[None] ,
identifier[word_confidence] = keyword[None] ,
identifier[timestamps] = keyword[None] ,
identifier[profanity_filter] = keyword[None] ,
identifier[smart_formatting] = keyword[None] ,
identifier[speaker_labels] = keyword[None] ,
identifier[http_proxy_host] = keyword[None] ,
identifier[http_proxy_port] = keyword[None] ,
identifier[customization_id] = keyword[None] ,
identifier[grammar_name] = keyword[None] ,
identifier[redaction] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[audio] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[audio] , identifier[AudioSource] ):
keyword[raise] identifier[Exception] (
literal[string] )
keyword[if] identifier[content_type] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[recognize_callback] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[recognize_callback] , identifier[RecognizeCallback] ):
keyword[raise] identifier[Exception] (
literal[string] )
identifier[headers] ={}
keyword[if] identifier[self] . identifier[default_headers] keyword[is] keyword[not] keyword[None] :
identifier[headers] = identifier[self] . identifier[default_headers] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
keyword[if] identifier[self] . identifier[token_manager] :
identifier[access_token] = identifier[self] . identifier[token_manager] . identifier[get_token] ()
identifier[headers] [ literal[string] ]= literal[string] . identifier[format] ( identifier[BEARER] , identifier[access_token] )
keyword[else] :
identifier[authstring] = literal[string] . identifier[format] ( identifier[self] . identifier[username] , identifier[self] . identifier[password] )
identifier[base64_authorization] = identifier[base64] . identifier[b64encode] ( identifier[authstring] . identifier[encode] ( literal[string] )). identifier[decode] ( literal[string] )
identifier[headers] [ literal[string] ]= literal[string] . identifier[format] ( identifier[base64_authorization] )
identifier[url] = identifier[self] . identifier[url] . identifier[replace] ( literal[string] , literal[string] )
identifier[params] ={
literal[string] : identifier[model] ,
literal[string] : identifier[customization_id] ,
literal[string] : identifier[acoustic_customization_id] ,
literal[string] : identifier[customization_weight] ,
literal[string] : identifier[base_model_version] ,
literal[string] : identifier[language_customization_id]
}
identifier[params] = identifier[dict] ([( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[params] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] ])
identifier[url] += literal[string] . identifier[format] ( identifier[urlencode] ( identifier[params] ))
identifier[options] ={
literal[string] : identifier[content_type] ,
literal[string] : identifier[inactivity_timeout] ,
literal[string] : identifier[interim_results] ,
literal[string] : identifier[keywords] ,
literal[string] : identifier[keywords_threshold] ,
literal[string] : identifier[max_alternatives] ,
literal[string] : identifier[word_alternatives_threshold] ,
literal[string] : identifier[word_confidence] ,
literal[string] : identifier[timestamps] ,
literal[string] : identifier[profanity_filter] ,
literal[string] : identifier[smart_formatting] ,
literal[string] : identifier[speaker_labels] ,
literal[string] : identifier[grammar_name] ,
literal[string] : identifier[redaction]
}
identifier[options] = identifier[dict] ([( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[options] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] ])
identifier[RecognizeListener] ( identifier[audio] ,
identifier[options] ,
identifier[recognize_callback] ,
identifier[url] ,
identifier[headers] ,
identifier[http_proxy_host] ,
identifier[http_proxy_port] ,
identifier[self] . identifier[verify] ) | def recognize_using_websocket(self, audio, content_type, recognize_callback, model=None, language_customization_id=None, acoustic_customization_id=None, customization_weight=None, base_model_version=None, inactivity_timeout=None, interim_results=None, keywords=None, keywords_threshold=None, max_alternatives=None, word_alternatives_threshold=None, word_confidence=None, timestamps=None, profanity_filter=None, smart_formatting=None, speaker_labels=None, http_proxy_host=None, http_proxy_port=None, customization_id=None, grammar_name=None, redaction=None, **kwargs):
"""
Sends audio for speech recognition using web sockets.
:param AudioSource audio: The audio to transcribe in the format specified by the
`Content-Type` header.
:param str content_type: The type of the input: audio/basic, audio/flac,
audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus,
audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or
audio/webm;codecs=vorbis.
:param RecognizeCallback recognize_callback: The callback method for the websocket.
:param str model: The identifier of the model that is to be used for the
recognition request or, for the **Create a session** method, with the new session.
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with service credentials created for
the instance of the service that owns the custom model. By default, no custom
language model is used. See [Custom
models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request or, for the
**Create a session** method, with the new session. The base model of the specified
custom acoustic model must match the model specified with the `model` parameter.
You must make the request with service credentials created for the instance of the
service that owns the custom model. By default, no custom acoustic model is used.
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request or, for sessions, with the
**Create a session** method, the customization weight tells the service how much
weight to give to words from the custom language model compared to those from the
base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
:param str base_model_version: The version of the specified base model that is to
be used with recognition request or, for the **Create a session** method, with the
new session. Multiple versions of a base model can exist when a model is updated
for internal improvements. The parameter is intended primarily for use with custom
models that have been upgraded for a new base model. The default value depends on
whether the parameter is used with or without a custom model. For more
information, see [Base model
version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in submitted audio, the connection is closed with a 400
error. Useful for stopping audio submission from a live microphone when a user
simply walks away. Use `-1` for infinity.
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more tokens. Keywords are spotted only in the
final hypothesis, not in interim results. If you specify any keywords, you must
also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit
the parameter or specify an empty array if you do not need to spot keywords.
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No keyword spotting is performed if you omit the parameter. If you
specify a threshold, you must also specify one or more keywords.
:param int max_alternatives: The maximum number of alternative transcripts to be
returned. By default, a single transcription is returned.
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0 and 1
inclusive. No alternative words are computed if you omit the parameter.
:param bool word_confidence: If `true`, a confidence measure in the range of 0 to
1 is returned for each word. By default, no word confidence measures are returned.
:param bool timestamps: If `true`, time alignment is returned for each word. By
default, no timestamps are returned.
:param bool profanity_filter: If `true` (the default), filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only.
:param bool smart_formatting: If `true`, converts dates, times, series of digits
and numbers, phone numbers, currency values, and internet addresses into more
readable, conventional representations in the final transcript of a recognition
request. For US English, also converts certain keyword strings to punctuation
symbols. By default, no smart formatting is performed. Applies to US English and
Spanish transcription only.
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, no speaker labels are returned. Setting `speaker_labels` to `true` forces
the `timestamps` parameter to be `true`, regardless of whether you specify `false`
for the parameter.
To determine whether a language model supports speaker labels, use the **Get
models** method and check that the attribute `speaker_labels` is set to `true`.
You can also refer to [Speaker
labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels).
:param str http_proxy_host: http proxy host name.
:param str http_proxy_port: http proxy port. If not set, set to 80.
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param dict headers: A `dict` containing the request headers
:return: A `dict` containing the `SpeechRecognitionResults` response.
:rtype: dict
"""
if audio is None:
raise ValueError('audio must be provided') # depends on [control=['if'], data=[]]
if not isinstance(audio, AudioSource):
raise Exception('audio is not of type AudioSource. Import the class from ibm_watson.websocket') # depends on [control=['if'], data=[]]
if content_type is None:
raise ValueError('content_type must be provided') # depends on [control=['if'], data=[]]
if recognize_callback is None:
raise ValueError('recognize_callback must be provided') # depends on [control=['if'], data=[]]
if not isinstance(recognize_callback, RecognizeCallback):
raise Exception('Callback is not a derived class of RecognizeCallback') # depends on [control=['if'], data=[]]
headers = {}
if self.default_headers is not None:
headers = self.default_headers.copy() # depends on [control=['if'], data=[]]
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
if self.token_manager:
access_token = self.token_manager.get_token()
headers['Authorization'] = '{0} {1}'.format(BEARER, access_token) # depends on [control=['if'], data=[]]
else:
authstring = '{0}:{1}'.format(self.username, self.password)
base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8')
headers['Authorization'] = 'Basic {0}'.format(base64_authorization)
url = self.url.replace('https:', 'wss:')
params = {'model': model, 'customization_id': customization_id, 'acoustic_customization_id': acoustic_customization_id, 'customization_weight': customization_weight, 'base_model_version': base_model_version, 'language_customization_id': language_customization_id}
params = dict([(k, v) for (k, v) in params.items() if v is not None])
url += '/v1/recognize?{0}'.format(urlencode(params))
options = {'content_type': content_type, 'inactivity_timeout': inactivity_timeout, 'interim_results': interim_results, 'keywords': keywords, 'keywords_threshold': keywords_threshold, 'max_alternatives': max_alternatives, 'word_alternatives_threshold': word_alternatives_threshold, 'word_confidence': word_confidence, 'timestamps': timestamps, 'profanity_filter': profanity_filter, 'smart_formatting': smart_formatting, 'speaker_labels': speaker_labels, 'grammar_name': grammar_name, 'redaction': redaction}
options = dict([(k, v) for (k, v) in options.items() if v is not None])
RecognizeListener(audio, options, recognize_callback, url, headers, http_proxy_host, http_proxy_port, self.verify) |
def get_sudoers_entry(username=None, sudoers_entries=None):
""" Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user.
"""
for entry in sudoers_entries:
if entry.startswith(username):
return entry.replace(username, '').strip() | def function[get_sudoers_entry, parameter[username, sudoers_entries]]:
constant[ Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user.
]
for taget[name[entry]] in starred[name[sudoers_entries]] begin[:]
if call[name[entry].startswith, parameter[name[username]]] begin[:]
return[call[call[name[entry].replace, parameter[name[username], constant[]]].strip, parameter[]]] | keyword[def] identifier[get_sudoers_entry] ( identifier[username] = keyword[None] , identifier[sudoers_entries] = keyword[None] ):
literal[string]
keyword[for] identifier[entry] keyword[in] identifier[sudoers_entries] :
keyword[if] identifier[entry] . identifier[startswith] ( identifier[username] ):
keyword[return] identifier[entry] . identifier[replace] ( identifier[username] , literal[string] ). identifier[strip] () | def get_sudoers_entry(username=None, sudoers_entries=None):
""" Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user.
"""
for entry in sudoers_entries:
if entry.startswith(username):
return entry.replace(username, '').strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] |
def input_(data, field_path):
"""Return a hydrated value of the ``input`` field."""
data_obj = Data.objects.get(id=data['__id'])
inputs = copy.deepcopy(data_obj.input)
# XXX: Optimize by hydrating only the required field (major refactoring).
hydrate_input_references(inputs, data_obj.process.input_schema)
hydrate_input_uploads(inputs, data_obj.process.input_schema)
return dict_dot(inputs, field_path) | def function[input_, parameter[data, field_path]]:
constant[Return a hydrated value of the ``input`` field.]
variable[data_obj] assign[=] call[name[Data].objects.get, parameter[]]
variable[inputs] assign[=] call[name[copy].deepcopy, parameter[name[data_obj].input]]
call[name[hydrate_input_references], parameter[name[inputs], name[data_obj].process.input_schema]]
call[name[hydrate_input_uploads], parameter[name[inputs], name[data_obj].process.input_schema]]
return[call[name[dict_dot], parameter[name[inputs], name[field_path]]]] | keyword[def] identifier[input_] ( identifier[data] , identifier[field_path] ):
literal[string]
identifier[data_obj] = identifier[Data] . identifier[objects] . identifier[get] ( identifier[id] = identifier[data] [ literal[string] ])
identifier[inputs] = identifier[copy] . identifier[deepcopy] ( identifier[data_obj] . identifier[input] )
identifier[hydrate_input_references] ( identifier[inputs] , identifier[data_obj] . identifier[process] . identifier[input_schema] )
identifier[hydrate_input_uploads] ( identifier[inputs] , identifier[data_obj] . identifier[process] . identifier[input_schema] )
keyword[return] identifier[dict_dot] ( identifier[inputs] , identifier[field_path] ) | def input_(data, field_path):
"""Return a hydrated value of the ``input`` field."""
data_obj = Data.objects.get(id=data['__id'])
inputs = copy.deepcopy(data_obj.input)
# XXX: Optimize by hydrating only the required field (major refactoring).
hydrate_input_references(inputs, data_obj.process.input_schema)
hydrate_input_uploads(inputs, data_obj.process.input_schema)
return dict_dot(inputs, field_path) |
def create_csr(path=None, text=False, **kwargs):
'''
Create a certificate signing request.
path:
Path to write the certificate to.
text:
If ``True``, return the PEM text without writing to a file.
Default ``False``.
algorithm:
The hashing algorithm to be used for signing this request. Defaults to sha256.
kwargs:
The subject, extension and version arguments from
:mod:`x509.create_certificate <salt.modules.x509.create_certificate>`
can be used.
ext_mapping:
Provide additional X509v3 extension mappings. This argument should be
in the form of a dictionary and should include both the OID and the
friendly name for the extension.
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' x509.create_csr path=/etc/pki/myca.csr public_key='/etc/pki/myca.key' CN='My Cert'
'''
if not path and not text:
raise salt.exceptions.SaltInvocationError(
'Either path or text must be specified.')
if path and text:
raise salt.exceptions.SaltInvocationError(
'Either path or text must be specified, not both.')
csr = M2Crypto.X509.Request()
subject = csr.get_subject()
for prop, default in six.iteritems(CERT_DEFAULTS):
if prop not in kwargs:
kwargs[prop] = default
csr.set_version(kwargs['version'] - 1)
if 'private_key' not in kwargs and 'public_key' in kwargs:
kwargs['private_key'] = kwargs['public_key']
log.warning("OpenSSL no longer allows working with non-signed CSRs. "
"A private_key must be specified. Attempting to use public_key as private_key")
if 'private_key' not in kwargs:
raise salt.exceptions.SaltInvocationError('private_key is required')
if 'public_key' not in kwargs:
kwargs['public_key'] = kwargs['private_key']
if 'private_key_passphrase' not in kwargs:
kwargs['private_key_passphrase'] = None
if 'public_key_passphrase' not in kwargs:
kwargs['public_key_passphrase'] = None
if kwargs['public_key_passphrase'] and not kwargs['private_key_passphrase']:
kwargs['private_key_passphrase'] = kwargs['public_key_passphrase']
if kwargs['private_key_passphrase'] and not kwargs['public_key_passphrase']:
kwargs['public_key_passphrase'] = kwargs['private_key_passphrase']
csr.set_pubkey(get_public_key(kwargs['public_key'],
passphrase=kwargs['public_key_passphrase'], asObj=True))
# pylint: disable=unused-variable
for entry, num in six.iteritems(subject.nid):
if entry in kwargs:
setattr(subject, entry, kwargs[entry])
# pylint: enable=unused-variable
if 'ext_mapping' in kwargs:
EXT_NAME_MAPPINGS.update(kwargs['ext_mapping'])
extstack = M2Crypto.X509.X509_Extension_Stack()
for extname, extlongname in six.iteritems(EXT_NAME_MAPPINGS):
if extname not in kwargs and extlongname not in kwargs:
continue
extval = kwargs.get(extname, None) or kwargs.get(extlongname, None)
critical = False
if extval.startswith('critical '):
critical = True
extval = extval[9:]
if extname == 'subjectKeyIdentifier' and 'hash' in extval:
extval = extval.replace('hash', _get_pubkey_hash(csr))
if extname == 'subjectAltName':
extval = extval.replace('IP Address', 'IP')
if extname == 'authorityKeyIdentifier':
continue
issuer = None
ext = _new_extension(
name=extname, value=extval, critical=critical, issuer=issuer)
if not ext.x509_ext:
log.info('Invalid X509v3 Extension. %s: %s', extname, extval)
continue
extstack.push(ext)
csr.add_extensions(extstack)
csr.sign(_get_private_key_obj(kwargs['private_key'],
passphrase=kwargs['private_key_passphrase']), kwargs['algorithm'])
return write_pem(text=csr.as_pem(), path=path, pem_type='CERTIFICATE REQUEST') if path else csr.as_pem() | def function[create_csr, parameter[path, text]]:
constant[
Create a certificate signing request.
path:
Path to write the certificate to.
text:
If ``True``, return the PEM text without writing to a file.
Default ``False``.
algorithm:
The hashing algorithm to be used for signing this request. Defaults to sha256.
kwargs:
The subject, extension and version arguments from
:mod:`x509.create_certificate <salt.modules.x509.create_certificate>`
can be used.
ext_mapping:
Provide additional X509v3 extension mappings. This argument should be
in the form of a dictionary and should include both the OID and the
friendly name for the extension.
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' x509.create_csr path=/etc/pki/myca.csr public_key='/etc/pki/myca.key' CN='My Cert'
]
if <ast.BoolOp object at 0x7da1b21fa5c0> begin[:]
<ast.Raise object at 0x7da1b21fa1d0>
if <ast.BoolOp object at 0x7da1b21f94b0> begin[:]
<ast.Raise object at 0x7da1b21f86d0>
variable[csr] assign[=] call[name[M2Crypto].X509.Request, parameter[]]
variable[subject] assign[=] call[name[csr].get_subject, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b21f8220>, <ast.Name object at 0x7da1b21f9ab0>]]] in starred[call[name[six].iteritems, parameter[name[CERT_DEFAULTS]]]] begin[:]
if compare[name[prop] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][name[prop]] assign[=] name[default]
call[name[csr].set_version, parameter[binary_operation[call[name[kwargs]][constant[version]] - constant[1]]]]
if <ast.BoolOp object at 0x7da1b21f89d0> begin[:]
call[name[kwargs]][constant[private_key]] assign[=] call[name[kwargs]][constant[public_key]]
call[name[log].warning, parameter[constant[OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key]]]
if compare[constant[private_key] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
<ast.Raise object at 0x7da1b21f9c90>
if compare[constant[public_key] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[public_key]] assign[=] call[name[kwargs]][constant[private_key]]
if compare[constant[private_key_passphrase] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[private_key_passphrase]] assign[=] constant[None]
if compare[constant[public_key_passphrase] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[public_key_passphrase]] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b21fa260> begin[:]
call[name[kwargs]][constant[private_key_passphrase]] assign[=] call[name[kwargs]][constant[public_key_passphrase]]
if <ast.BoolOp object at 0x7da1b21f8a30> begin[:]
call[name[kwargs]][constant[public_key_passphrase]] assign[=] call[name[kwargs]][constant[private_key_passphrase]]
call[name[csr].set_pubkey, parameter[call[name[get_public_key], parameter[call[name[kwargs]][constant[public_key]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b21f83d0>, <ast.Name object at 0x7da1b21f9f60>]]] in starred[call[name[six].iteritems, parameter[name[subject].nid]]] begin[:]
if compare[name[entry] in name[kwargs]] begin[:]
call[name[setattr], parameter[name[subject], name[entry], call[name[kwargs]][name[entry]]]]
if compare[constant[ext_mapping] in name[kwargs]] begin[:]
call[name[EXT_NAME_MAPPINGS].update, parameter[call[name[kwargs]][constant[ext_mapping]]]]
variable[extstack] assign[=] call[name[M2Crypto].X509.X509_Extension_Stack, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da207f9b580>, <ast.Name object at 0x7da207f9a6b0>]]] in starred[call[name[six].iteritems, parameter[name[EXT_NAME_MAPPINGS]]]] begin[:]
if <ast.BoolOp object at 0x7da207f992d0> begin[:]
continue
variable[extval] assign[=] <ast.BoolOp object at 0x7da207f9b760>
variable[critical] assign[=] constant[False]
if call[name[extval].startswith, parameter[constant[critical ]]] begin[:]
variable[critical] assign[=] constant[True]
variable[extval] assign[=] call[name[extval]][<ast.Slice object at 0x7da207f99120>]
if <ast.BoolOp object at 0x7da207f9aad0> begin[:]
variable[extval] assign[=] call[name[extval].replace, parameter[constant[hash], call[name[_get_pubkey_hash], parameter[name[csr]]]]]
if compare[name[extname] equal[==] constant[subjectAltName]] begin[:]
variable[extval] assign[=] call[name[extval].replace, parameter[constant[IP Address], constant[IP]]]
if compare[name[extname] equal[==] constant[authorityKeyIdentifier]] begin[:]
continue
variable[issuer] assign[=] constant[None]
variable[ext] assign[=] call[name[_new_extension], parameter[]]
if <ast.UnaryOp object at 0x7da207f98cd0> begin[:]
call[name[log].info, parameter[constant[Invalid X509v3 Extension. %s: %s], name[extname], name[extval]]]
continue
call[name[extstack].push, parameter[name[ext]]]
call[name[csr].add_extensions, parameter[name[extstack]]]
call[name[csr].sign, parameter[call[name[_get_private_key_obj], parameter[call[name[kwargs]][constant[private_key]]]], call[name[kwargs]][constant[algorithm]]]]
return[<ast.IfExp object at 0x7da207f9aef0>] | keyword[def] identifier[create_csr] ( identifier[path] = keyword[None] , identifier[text] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[path] keyword[and] keyword[not] identifier[text] :
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] (
literal[string] )
keyword[if] identifier[path] keyword[and] identifier[text] :
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] (
literal[string] )
identifier[csr] = identifier[M2Crypto] . identifier[X509] . identifier[Request] ()
identifier[subject] = identifier[csr] . identifier[get_subject] ()
keyword[for] identifier[prop] , identifier[default] keyword[in] identifier[six] . identifier[iteritems] ( identifier[CERT_DEFAULTS] ):
keyword[if] identifier[prop] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ identifier[prop] ]= identifier[default]
identifier[csr] . identifier[set_version] ( identifier[kwargs] [ literal[string] ]- literal[int] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
identifier[log] . identifier[warning] ( literal[string]
literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[None]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= keyword[None]
keyword[if] identifier[kwargs] [ literal[string] ] keyword[and] keyword[not] identifier[kwargs] [ literal[string] ]:
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
keyword[if] identifier[kwargs] [ literal[string] ] keyword[and] keyword[not] identifier[kwargs] [ literal[string] ]:
identifier[kwargs] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
identifier[csr] . identifier[set_pubkey] ( identifier[get_public_key] ( identifier[kwargs] [ literal[string] ],
identifier[passphrase] = identifier[kwargs] [ literal[string] ], identifier[asObj] = keyword[True] ))
keyword[for] identifier[entry] , identifier[num] keyword[in] identifier[six] . identifier[iteritems] ( identifier[subject] . identifier[nid] ):
keyword[if] identifier[entry] keyword[in] identifier[kwargs] :
identifier[setattr] ( identifier[subject] , identifier[entry] , identifier[kwargs] [ identifier[entry] ])
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[EXT_NAME_MAPPINGS] . identifier[update] ( identifier[kwargs] [ literal[string] ])
identifier[extstack] = identifier[M2Crypto] . identifier[X509] . identifier[X509_Extension_Stack] ()
keyword[for] identifier[extname] , identifier[extlongname] keyword[in] identifier[six] . identifier[iteritems] ( identifier[EXT_NAME_MAPPINGS] ):
keyword[if] identifier[extname] keyword[not] keyword[in] identifier[kwargs] keyword[and] identifier[extlongname] keyword[not] keyword[in] identifier[kwargs] :
keyword[continue]
identifier[extval] = identifier[kwargs] . identifier[get] ( identifier[extname] , keyword[None] ) keyword[or] identifier[kwargs] . identifier[get] ( identifier[extlongname] , keyword[None] )
identifier[critical] = keyword[False]
keyword[if] identifier[extval] . identifier[startswith] ( literal[string] ):
identifier[critical] = keyword[True]
identifier[extval] = identifier[extval] [ literal[int] :]
keyword[if] identifier[extname] == literal[string] keyword[and] literal[string] keyword[in] identifier[extval] :
identifier[extval] = identifier[extval] . identifier[replace] ( literal[string] , identifier[_get_pubkey_hash] ( identifier[csr] ))
keyword[if] identifier[extname] == literal[string] :
identifier[extval] = identifier[extval] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[extname] == literal[string] :
keyword[continue]
identifier[issuer] = keyword[None]
identifier[ext] = identifier[_new_extension] (
identifier[name] = identifier[extname] , identifier[value] = identifier[extval] , identifier[critical] = identifier[critical] , identifier[issuer] = identifier[issuer] )
keyword[if] keyword[not] identifier[ext] . identifier[x509_ext] :
identifier[log] . identifier[info] ( literal[string] , identifier[extname] , identifier[extval] )
keyword[continue]
identifier[extstack] . identifier[push] ( identifier[ext] )
identifier[csr] . identifier[add_extensions] ( identifier[extstack] )
identifier[csr] . identifier[sign] ( identifier[_get_private_key_obj] ( identifier[kwargs] [ literal[string] ],
identifier[passphrase] = identifier[kwargs] [ literal[string] ]), identifier[kwargs] [ literal[string] ])
keyword[return] identifier[write_pem] ( identifier[text] = identifier[csr] . identifier[as_pem] (), identifier[path] = identifier[path] , identifier[pem_type] = literal[string] ) keyword[if] identifier[path] keyword[else] identifier[csr] . identifier[as_pem] () | def create_csr(path=None, text=False, **kwargs):
"""
Create a certificate signing request.
path:
Path to write the certificate to.
text:
If ``True``, return the PEM text without writing to a file.
Default ``False``.
algorithm:
The hashing algorithm to be used for signing this request. Defaults to sha256.
kwargs:
The subject, extension and version arguments from
:mod:`x509.create_certificate <salt.modules.x509.create_certificate>`
can be used.
ext_mapping:
Provide additional X509v3 extension mappings. This argument should be
in the form of a dictionary and should include both the OID and the
friendly name for the extension.
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' x509.create_csr path=/etc/pki/myca.csr public_key='/etc/pki/myca.key' CN='My Cert'
"""
if not path and (not text):
raise salt.exceptions.SaltInvocationError('Either path or text must be specified.') # depends on [control=['if'], data=[]]
if path and text:
raise salt.exceptions.SaltInvocationError('Either path or text must be specified, not both.') # depends on [control=['if'], data=[]]
csr = M2Crypto.X509.Request()
subject = csr.get_subject()
for (prop, default) in six.iteritems(CERT_DEFAULTS):
if prop not in kwargs:
kwargs[prop] = default # depends on [control=['if'], data=['prop', 'kwargs']] # depends on [control=['for'], data=[]]
csr.set_version(kwargs['version'] - 1)
if 'private_key' not in kwargs and 'public_key' in kwargs:
kwargs['private_key'] = kwargs['public_key']
log.warning('OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key') # depends on [control=['if'], data=[]]
if 'private_key' not in kwargs:
raise salt.exceptions.SaltInvocationError('private_key is required') # depends on [control=['if'], data=[]]
if 'public_key' not in kwargs:
kwargs['public_key'] = kwargs['private_key'] # depends on [control=['if'], data=['kwargs']]
if 'private_key_passphrase' not in kwargs:
kwargs['private_key_passphrase'] = None # depends on [control=['if'], data=['kwargs']]
if 'public_key_passphrase' not in kwargs:
kwargs['public_key_passphrase'] = None # depends on [control=['if'], data=['kwargs']]
if kwargs['public_key_passphrase'] and (not kwargs['private_key_passphrase']):
kwargs['private_key_passphrase'] = kwargs['public_key_passphrase'] # depends on [control=['if'], data=[]]
if kwargs['private_key_passphrase'] and (not kwargs['public_key_passphrase']):
kwargs['public_key_passphrase'] = kwargs['private_key_passphrase'] # depends on [control=['if'], data=[]]
csr.set_pubkey(get_public_key(kwargs['public_key'], passphrase=kwargs['public_key_passphrase'], asObj=True))
# pylint: disable=unused-variable
for (entry, num) in six.iteritems(subject.nid):
if entry in kwargs:
setattr(subject, entry, kwargs[entry]) # depends on [control=['if'], data=['entry', 'kwargs']] # depends on [control=['for'], data=[]]
# pylint: enable=unused-variable
if 'ext_mapping' in kwargs:
EXT_NAME_MAPPINGS.update(kwargs['ext_mapping']) # depends on [control=['if'], data=['kwargs']]
extstack = M2Crypto.X509.X509_Extension_Stack()
for (extname, extlongname) in six.iteritems(EXT_NAME_MAPPINGS):
if extname not in kwargs and extlongname not in kwargs:
continue # depends on [control=['if'], data=[]]
extval = kwargs.get(extname, None) or kwargs.get(extlongname, None)
critical = False
if extval.startswith('critical '):
critical = True
extval = extval[9:] # depends on [control=['if'], data=[]]
if extname == 'subjectKeyIdentifier' and 'hash' in extval:
extval = extval.replace('hash', _get_pubkey_hash(csr)) # depends on [control=['if'], data=[]]
if extname == 'subjectAltName':
extval = extval.replace('IP Address', 'IP') # depends on [control=['if'], data=[]]
if extname == 'authorityKeyIdentifier':
continue # depends on [control=['if'], data=[]]
issuer = None
ext = _new_extension(name=extname, value=extval, critical=critical, issuer=issuer)
if not ext.x509_ext:
log.info('Invalid X509v3 Extension. %s: %s', extname, extval)
continue # depends on [control=['if'], data=[]]
extstack.push(ext) # depends on [control=['for'], data=[]]
csr.add_extensions(extstack)
csr.sign(_get_private_key_obj(kwargs['private_key'], passphrase=kwargs['private_key_passphrase']), kwargs['algorithm'])
return write_pem(text=csr.as_pem(), path=path, pem_type='CERTIFICATE REQUEST') if path else csr.as_pem() |
def trans_new(name, transform, inverse, breaks=None,
minor_breaks=None, _format=None,
domain=(-np.inf, np.inf), doc='', **kwargs):
"""
Create a transformation class object
Parameters
----------
name : str
Name of the transformation
transform : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the transformation.
inverse : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the inverse of the transformation.
breaks : callable ``f(limits)``
Function to compute the breaks for this transform.
If None, then a default good enough for a linear
domain is used.
minor_breaks : callable ``f(major, limits)``
Function to compute the minor breaks for this
transform. If None, then a default good enough for
a linear domain is used.
_format : callable ``f(breaks)``
Function to format the generated breaks.
domain : array_like
Domain over which the transformation is valid.
It should be of length 2.
doc : str
Docstring for the class.
**kwargs : dict
Attributes of the transform, e.g if base is passed
in kwargs, then `t.base` would be a valied attribute.
Returns
-------
out : trans
Transform class
"""
def _get(func):
if isinstance(func, (classmethod, staticmethod, MethodType)):
return func
else:
return staticmethod(func)
klass_name = '{}_trans'.format(name)
d = {'transform': _get(transform),
'inverse': _get(inverse),
'domain': domain,
'__doc__': doc,
**kwargs}
if breaks:
d['breaks_'] = _get(breaks)
if minor_breaks:
d['minor_breaks'] = _get(minor_breaks)
if _format:
d['format'] = _get(_format)
return type(klass_name, (trans,), d) | def function[trans_new, parameter[name, transform, inverse, breaks, minor_breaks, _format, domain, doc]]:
constant[
Create a transformation class object
Parameters
----------
name : str
Name of the transformation
transform : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the transformation.
inverse : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the inverse of the transformation.
breaks : callable ``f(limits)``
Function to compute the breaks for this transform.
If None, then a default good enough for a linear
domain is used.
minor_breaks : callable ``f(major, limits)``
Function to compute the minor breaks for this
transform. If None, then a default good enough for
a linear domain is used.
_format : callable ``f(breaks)``
Function to format the generated breaks.
domain : array_like
Domain over which the transformation is valid.
It should be of length 2.
doc : str
Docstring for the class.
**kwargs : dict
Attributes of the transform, e.g if base is passed
in kwargs, then `t.base` would be a valied attribute.
Returns
-------
out : trans
Transform class
]
def function[_get, parameter[func]]:
if call[name[isinstance], parameter[name[func], tuple[[<ast.Name object at 0x7da1b00f7730>, <ast.Name object at 0x7da1b00f4970>, <ast.Name object at 0x7da1b00f4a60>]]]] begin[:]
return[name[func]]
variable[klass_name] assign[=] call[constant[{}_trans].format, parameter[name[name]]]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b00f73a0>, <ast.Constant object at 0x7da1b00f7b50>, <ast.Constant object at 0x7da1b00f5a50>, <ast.Constant object at 0x7da1b00f4cd0>, None], [<ast.Call object at 0x7da1b00f73d0>, <ast.Call object at 0x7da1b00f5810>, <ast.Name object at 0x7da1b00f62f0>, <ast.Name object at 0x7da1b00f4250>, <ast.Name object at 0x7da1b00f4130>]]
if name[breaks] begin[:]
call[name[d]][constant[breaks_]] assign[=] call[name[_get], parameter[name[breaks]]]
if name[minor_breaks] begin[:]
call[name[d]][constant[minor_breaks]] assign[=] call[name[_get], parameter[name[minor_breaks]]]
if name[_format] begin[:]
call[name[d]][constant[format]] assign[=] call[name[_get], parameter[name[_format]]]
return[call[name[type], parameter[name[klass_name], tuple[[<ast.Name object at 0x7da1b00f5b40>]], name[d]]]] | keyword[def] identifier[trans_new] ( identifier[name] , identifier[transform] , identifier[inverse] , identifier[breaks] = keyword[None] ,
identifier[minor_breaks] = keyword[None] , identifier[_format] = keyword[None] ,
identifier[domain] =(- identifier[np] . identifier[inf] , identifier[np] . identifier[inf] ), identifier[doc] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[_get] ( identifier[func] ):
keyword[if] identifier[isinstance] ( identifier[func] ,( identifier[classmethod] , identifier[staticmethod] , identifier[MethodType] )):
keyword[return] identifier[func]
keyword[else] :
keyword[return] identifier[staticmethod] ( identifier[func] )
identifier[klass_name] = literal[string] . identifier[format] ( identifier[name] )
identifier[d] ={ literal[string] : identifier[_get] ( identifier[transform] ),
literal[string] : identifier[_get] ( identifier[inverse] ),
literal[string] : identifier[domain] ,
literal[string] : identifier[doc] ,
** identifier[kwargs] }
keyword[if] identifier[breaks] :
identifier[d] [ literal[string] ]= identifier[_get] ( identifier[breaks] )
keyword[if] identifier[minor_breaks] :
identifier[d] [ literal[string] ]= identifier[_get] ( identifier[minor_breaks] )
keyword[if] identifier[_format] :
identifier[d] [ literal[string] ]= identifier[_get] ( identifier[_format] )
keyword[return] identifier[type] ( identifier[klass_name] ,( identifier[trans] ,), identifier[d] ) | def trans_new(name, transform, inverse, breaks=None, minor_breaks=None, _format=None, domain=(-np.inf, np.inf), doc='', **kwargs):
"""
Create a transformation class object
Parameters
----------
name : str
Name of the transformation
transform : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the transformation.
inverse : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the inverse of the transformation.
breaks : callable ``f(limits)``
Function to compute the breaks for this transform.
If None, then a default good enough for a linear
domain is used.
minor_breaks : callable ``f(major, limits)``
Function to compute the minor breaks for this
transform. If None, then a default good enough for
a linear domain is used.
_format : callable ``f(breaks)``
Function to format the generated breaks.
domain : array_like
Domain over which the transformation is valid.
It should be of length 2.
doc : str
Docstring for the class.
**kwargs : dict
Attributes of the transform, e.g if base is passed
in kwargs, then `t.base` would be a valied attribute.
Returns
-------
out : trans
Transform class
"""
def _get(func):
if isinstance(func, (classmethod, staticmethod, MethodType)):
return func # depends on [control=['if'], data=[]]
else:
return staticmethod(func)
klass_name = '{}_trans'.format(name)
d = {'transform': _get(transform), 'inverse': _get(inverse), 'domain': domain, '__doc__': doc, **kwargs}
if breaks:
d['breaks_'] = _get(breaks) # depends on [control=['if'], data=[]]
if minor_breaks:
d['minor_breaks'] = _get(minor_breaks) # depends on [control=['if'], data=[]]
if _format:
d['format'] = _get(_format) # depends on [control=['if'], data=[]]
return type(klass_name, (trans,), d) |
def propagate_event_to_delegate(self, event, eventhandler):
"""Propagate the given Mouse event to the widgetdelegate
Enter edit mode, get the editor widget and issue an event on that widget.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:param eventhandler: the eventhandler to use. E.g. ``"mousePressEvent"``
:type eventhandler: str
:returns: None
:rtype: None
:raises: None
"""
# if we are recursing because we sent a click event, and it got propagated to the parents
# and we recieve it again, terminate
if self.__recursing:
return
# find index at mouse position
i = self.index_at_event(event)
# if the index is not valid, we don't care
# handle it the default way
if not i.isValid():
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# get the widget delegate. if there is None, handle it the default way
delegate = self.itemDelegate(i)
if not isinstance(delegate, WidgetDelegate):
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# see if there is already a editor
widget = delegate.edit_widget(i)
if not widget:
# close all editors, then start editing
delegate.close_editors()
# Force editing. If in editing state, view will refuse editing.
if self.state() == self.EditingState:
self.setState(self.NoState)
self.edit(i)
# get the editor widget. if there is None, there is nothing to do so return
widget = delegate.edit_widget(i)
if not widget:
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# try to find the relative position to the widget
pid = self.get_pos_in_delegate(i, event.globalPos())
widgetatpos = widget.childAt(pid)
if widgetatpos:
widgettoclick = widgetatpos
g = widget.mapToGlobal(pid)
clickpos = widgettoclick.mapFromGlobal(g)
else:
widgettoclick = widget
clickpos = pid
# create a new event for the editor widget.
e = QtGui.QMouseEvent(event.type(),
clickpos,
event.button(),
event.buttons(),
event.modifiers())
# before we send, make sure, we cannot recurse
self.__recursing = True
try:
r = QtGui.QApplication.sendEvent(widgettoclick, e)
finally:
self.__recursing = False # out of the recursion. now we can accept click events again
return r | def function[propagate_event_to_delegate, parameter[self, event, eventhandler]]:
constant[Propagate the given Mouse event to the widgetdelegate
Enter edit mode, get the editor widget and issue an event on that widget.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:param eventhandler: the eventhandler to use. E.g. ``"mousePressEvent"``
:type eventhandler: str
:returns: None
:rtype: None
:raises: None
]
if name[self].__recursing begin[:]
return[None]
variable[i] assign[=] call[name[self].index_at_event, parameter[name[event]]]
if <ast.UnaryOp object at 0x7da20e956050> begin[:]
return[call[call[name[getattr], parameter[call[name[super], parameter[name[WidgetDelegateViewMixin], name[self]]], name[eventhandler]]], parameter[name[event]]]]
variable[delegate] assign[=] call[name[self].itemDelegate, parameter[name[i]]]
if <ast.UnaryOp object at 0x7da20e954310> begin[:]
return[call[call[name[getattr], parameter[call[name[super], parameter[name[WidgetDelegateViewMixin], name[self]]], name[eventhandler]]], parameter[name[event]]]]
variable[widget] assign[=] call[name[delegate].edit_widget, parameter[name[i]]]
if <ast.UnaryOp object at 0x7da20e957f40> begin[:]
call[name[delegate].close_editors, parameter[]]
if compare[call[name[self].state, parameter[]] equal[==] name[self].EditingState] begin[:]
call[name[self].setState, parameter[name[self].NoState]]
call[name[self].edit, parameter[name[i]]]
variable[widget] assign[=] call[name[delegate].edit_widget, parameter[name[i]]]
if <ast.UnaryOp object at 0x7da20e954dc0> begin[:]
return[call[call[name[getattr], parameter[call[name[super], parameter[name[WidgetDelegateViewMixin], name[self]]], name[eventhandler]]], parameter[name[event]]]]
variable[pid] assign[=] call[name[self].get_pos_in_delegate, parameter[name[i], call[name[event].globalPos, parameter[]]]]
variable[widgetatpos] assign[=] call[name[widget].childAt, parameter[name[pid]]]
if name[widgetatpos] begin[:]
variable[widgettoclick] assign[=] name[widgetatpos]
variable[g] assign[=] call[name[widget].mapToGlobal, parameter[name[pid]]]
variable[clickpos] assign[=] call[name[widgettoclick].mapFromGlobal, parameter[name[g]]]
variable[e] assign[=] call[name[QtGui].QMouseEvent, parameter[call[name[event].type, parameter[]], name[clickpos], call[name[event].button, parameter[]], call[name[event].buttons, parameter[]], call[name[event].modifiers, parameter[]]]]
name[self].__recursing assign[=] constant[True]
<ast.Try object at 0x7da1b16692a0>
return[name[r]] | keyword[def] identifier[propagate_event_to_delegate] ( identifier[self] , identifier[event] , identifier[eventhandler] ):
literal[string]
keyword[if] identifier[self] . identifier[__recursing] :
keyword[return]
identifier[i] = identifier[self] . identifier[index_at_event] ( identifier[event] )
keyword[if] keyword[not] identifier[i] . identifier[isValid] ():
keyword[return] identifier[getattr] ( identifier[super] ( identifier[WidgetDelegateViewMixin] , identifier[self] ), identifier[eventhandler] )( identifier[event] )
identifier[delegate] = identifier[self] . identifier[itemDelegate] ( identifier[i] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[delegate] , identifier[WidgetDelegate] ):
keyword[return] identifier[getattr] ( identifier[super] ( identifier[WidgetDelegateViewMixin] , identifier[self] ), identifier[eventhandler] )( identifier[event] )
identifier[widget] = identifier[delegate] . identifier[edit_widget] ( identifier[i] )
keyword[if] keyword[not] identifier[widget] :
identifier[delegate] . identifier[close_editors] ()
keyword[if] identifier[self] . identifier[state] ()== identifier[self] . identifier[EditingState] :
identifier[self] . identifier[setState] ( identifier[self] . identifier[NoState] )
identifier[self] . identifier[edit] ( identifier[i] )
identifier[widget] = identifier[delegate] . identifier[edit_widget] ( identifier[i] )
keyword[if] keyword[not] identifier[widget] :
keyword[return] identifier[getattr] ( identifier[super] ( identifier[WidgetDelegateViewMixin] , identifier[self] ), identifier[eventhandler] )( identifier[event] )
identifier[pid] = identifier[self] . identifier[get_pos_in_delegate] ( identifier[i] , identifier[event] . identifier[globalPos] ())
identifier[widgetatpos] = identifier[widget] . identifier[childAt] ( identifier[pid] )
keyword[if] identifier[widgetatpos] :
identifier[widgettoclick] = identifier[widgetatpos]
identifier[g] = identifier[widget] . identifier[mapToGlobal] ( identifier[pid] )
identifier[clickpos] = identifier[widgettoclick] . identifier[mapFromGlobal] ( identifier[g] )
keyword[else] :
identifier[widgettoclick] = identifier[widget]
identifier[clickpos] = identifier[pid]
identifier[e] = identifier[QtGui] . identifier[QMouseEvent] ( identifier[event] . identifier[type] (),
identifier[clickpos] ,
identifier[event] . identifier[button] (),
identifier[event] . identifier[buttons] (),
identifier[event] . identifier[modifiers] ())
identifier[self] . identifier[__recursing] = keyword[True]
keyword[try] :
identifier[r] = identifier[QtGui] . identifier[QApplication] . identifier[sendEvent] ( identifier[widgettoclick] , identifier[e] )
keyword[finally] :
identifier[self] . identifier[__recursing] = keyword[False]
keyword[return] identifier[r] | def propagate_event_to_delegate(self, event, eventhandler):
"""Propagate the given Mouse event to the widgetdelegate
Enter edit mode, get the editor widget and issue an event on that widget.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:param eventhandler: the eventhandler to use. E.g. ``"mousePressEvent"``
:type eventhandler: str
:returns: None
:rtype: None
:raises: None
"""
# if we are recursing because we sent a click event, and it got propagated to the parents
# and we recieve it again, terminate
if self.__recursing:
return # depends on [control=['if'], data=[]]
# find index at mouse position
i = self.index_at_event(event)
# if the index is not valid, we don't care
# handle it the default way
if not i.isValid():
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event) # depends on [control=['if'], data=[]]
# get the widget delegate. if there is None, handle it the default way
delegate = self.itemDelegate(i)
if not isinstance(delegate, WidgetDelegate):
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event) # depends on [control=['if'], data=[]]
# see if there is already a editor
widget = delegate.edit_widget(i)
if not widget:
# close all editors, then start editing
delegate.close_editors()
# Force editing. If in editing state, view will refuse editing.
if self.state() == self.EditingState:
self.setState(self.NoState) # depends on [control=['if'], data=[]]
self.edit(i)
# get the editor widget. if there is None, there is nothing to do so return
widget = delegate.edit_widget(i) # depends on [control=['if'], data=[]]
if not widget:
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event) # depends on [control=['if'], data=[]]
# try to find the relative position to the widget
pid = self.get_pos_in_delegate(i, event.globalPos())
widgetatpos = widget.childAt(pid)
if widgetatpos:
widgettoclick = widgetatpos
g = widget.mapToGlobal(pid)
clickpos = widgettoclick.mapFromGlobal(g) # depends on [control=['if'], data=[]]
else:
widgettoclick = widget
clickpos = pid
# create a new event for the editor widget.
e = QtGui.QMouseEvent(event.type(), clickpos, event.button(), event.buttons(), event.modifiers())
# before we send, make sure, we cannot recurse
self.__recursing = True
try:
r = QtGui.QApplication.sendEvent(widgettoclick, e) # depends on [control=['try'], data=[]]
finally:
self.__recursing = False # out of the recursion. now we can accept click events again
return r |
def load_configuration_file(self):
"""
Load all configuration from file
"""
if not os.path.exists(self.config_file):
return
try:
with open(self.config_file, 'r') as file:
csvreader = csv.reader(file, delimiter='=',
escapechar='\\', quoting=csv.QUOTE_NONE)
for line in csvreader:
if len(line) == 2:
key, value = line
self.config_dict[key] = value
else:
self.config_dict = dict()
self.logger.warning("Malformed configuration file {0}, ignoring it.".
format(self.config_file))
return
except (OSError, IOError) as e:
self.logger.warning("Could not load configuration file: {0}".\
format(utils.exc_as_decoded_string(e))) | def function[load_configuration_file, parameter[self]]:
constant[
Load all configuration from file
]
if <ast.UnaryOp object at 0x7da1b0fafa90> begin[:]
return[None]
<ast.Try object at 0x7da1b0faffa0> | keyword[def] identifier[load_configuration_file] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[config_file] ):
keyword[return]
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[config_file] , literal[string] ) keyword[as] identifier[file] :
identifier[csvreader] = identifier[csv] . identifier[reader] ( identifier[file] , identifier[delimiter] = literal[string] ,
identifier[escapechar] = literal[string] , identifier[quoting] = identifier[csv] . identifier[QUOTE_NONE] )
keyword[for] identifier[line] keyword[in] identifier[csvreader] :
keyword[if] identifier[len] ( identifier[line] )== literal[int] :
identifier[key] , identifier[value] = identifier[line]
identifier[self] . identifier[config_dict] [ identifier[key] ]= identifier[value]
keyword[else] :
identifier[self] . identifier[config_dict] = identifier[dict] ()
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] .
identifier[format] ( identifier[self] . identifier[config_file] ))
keyword[return]
keyword[except] ( identifier[OSError] , identifier[IOError] ) keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[utils] . identifier[exc_as_decoded_string] ( identifier[e] ))) | def load_configuration_file(self):
"""
Load all configuration from file
"""
if not os.path.exists(self.config_file):
return # depends on [control=['if'], data=[]]
try:
with open(self.config_file, 'r') as file:
csvreader = csv.reader(file, delimiter='=', escapechar='\\', quoting=csv.QUOTE_NONE)
for line in csvreader:
if len(line) == 2:
(key, value) = line
self.config_dict[key] = value # depends on [control=['if'], data=[]]
else:
self.config_dict = dict()
self.logger.warning('Malformed configuration file {0}, ignoring it.'.format(self.config_file))
return # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file']] # depends on [control=['try'], data=[]]
except (OSError, IOError) as e:
self.logger.warning('Could not load configuration file: {0}'.format(utils.exc_as_decoded_string(e))) # depends on [control=['except'], data=['e']] |
def _parse_logo(self, parsed_content):
"""
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
"""
logo_img = parsed_content.find('img', {'height': '64'})
if logo_img is None:
return False
self.logo_url = logo_img["src"]
return True | def function[_parse_logo, parameter[self, parsed_content]]:
constant[
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
]
variable[logo_img] assign[=] call[name[parsed_content].find, parameter[constant[img], dictionary[[<ast.Constant object at 0x7da20c795f90>], [<ast.Constant object at 0x7da20c794700>]]]]
if compare[name[logo_img] is constant[None]] begin[:]
return[constant[False]]
name[self].logo_url assign[=] call[name[logo_img]][constant[src]]
return[constant[True]] | keyword[def] identifier[_parse_logo] ( identifier[self] , identifier[parsed_content] ):
literal[string]
identifier[logo_img] = identifier[parsed_content] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[logo_img] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[self] . identifier[logo_url] = identifier[logo_img] [ literal[string] ]
keyword[return] keyword[True] | def _parse_logo(self, parsed_content):
"""
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
"""
logo_img = parsed_content.find('img', {'height': '64'})
if logo_img is None:
return False # depends on [control=['if'], data=[]]
self.logo_url = logo_img['src']
return True |
def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1)
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day | def function[_days_in_month, parameter[date]]:
constant[The number of days in the month of the given date]
if compare[name[date].month equal[==] constant[12]] begin[:]
variable[reference] assign[=] call[call[name[type], parameter[name[date]]], parameter[binary_operation[name[date].year + constant[1]], constant[1], constant[1]]]
return[binary_operation[name[reference] - call[name[timedelta], parameter[]]].day] | keyword[def] identifier[_days_in_month] ( identifier[date] ):
literal[string]
keyword[if] identifier[date] . identifier[month] == literal[int] :
identifier[reference] = identifier[type] ( identifier[date] )( identifier[date] . identifier[year] + literal[int] , literal[int] , literal[int] )
keyword[else] :
identifier[reference] = identifier[type] ( identifier[date] )( identifier[date] . identifier[year] , identifier[date] . identifier[month] + literal[int] , literal[int] )
keyword[return] ( identifier[reference] - identifier[timedelta] ( identifier[days] = literal[int] )). identifier[day] | def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1) # depends on [control=['if'], data=[]]
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day |
def save_task_request(self, idents, msg):
"""Save the submission of a task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("task::client %r sent invalid task message: %r",
client_id, msg, exc_info=True)
return
record = init_record(msg)
record['client_uuid'] = client_id.decode('ascii')
record['queue'] = 'task'
header = msg['header']
msg_id = header['msg_id']
self.pending.add(msg_id)
self.unassigned.add(msg_id)
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
if existing['resubmitted']:
for key in ('submitted', 'client_uuid', 'buffers'):
# don't clobber these keys on resubmit
# submitted and client_uuid should be different
# and buffers might be big, and shouldn't have changed
record.pop(key)
# still check content,header which should not change
# but are not expensive to compare as buffers
for key,evalue in existing.iteritems():
if key.endswith('buffers'):
# don't compare buffers
continue
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error("DB Error adding record %r", msg_id, exc_info=True)
except Exception:
self.log.error("DB Error saving task request %r", msg_id, exc_info=True) | def function[save_task_request, parameter[self, idents, msg]]:
constant[Save the submission of a task.]
variable[client_id] assign[=] call[name[idents]][constant[0]]
<ast.Try object at 0x7da207f9a440>
variable[record] assign[=] call[name[init_record], parameter[name[msg]]]
call[name[record]][constant[client_uuid]] assign[=] call[name[client_id].decode, parameter[constant[ascii]]]
call[name[record]][constant[queue]] assign[=] constant[task]
variable[header] assign[=] call[name[msg]][constant[header]]
variable[msg_id] assign[=] call[name[header]][constant[msg_id]]
call[name[self].pending.add, parameter[name[msg_id]]]
call[name[self].unassigned.add, parameter[name[msg_id]]]
<ast.Try object at 0x7da207f98640> | keyword[def] identifier[save_task_request] ( identifier[self] , identifier[idents] , identifier[msg] ):
literal[string]
identifier[client_id] = identifier[idents] [ literal[int] ]
keyword[try] :
identifier[msg] = identifier[self] . identifier[session] . identifier[unserialize] ( identifier[msg] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] ,
identifier[client_id] , identifier[msg] , identifier[exc_info] = keyword[True] )
keyword[return]
identifier[record] = identifier[init_record] ( identifier[msg] )
identifier[record] [ literal[string] ]= identifier[client_id] . identifier[decode] ( literal[string] )
identifier[record] [ literal[string] ]= literal[string]
identifier[header] = identifier[msg] [ literal[string] ]
identifier[msg_id] = identifier[header] [ literal[string] ]
identifier[self] . identifier[pending] . identifier[add] ( identifier[msg_id] )
identifier[self] . identifier[unassigned] . identifier[add] ( identifier[msg_id] )
keyword[try] :
identifier[existing] = identifier[self] . identifier[db] . identifier[get_record] ( identifier[msg_id] )
keyword[if] identifier[existing] [ literal[string] ]:
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[record] . identifier[pop] ( identifier[key] )
keyword[for] identifier[key] , identifier[evalue] keyword[in] identifier[existing] . identifier[iteritems] ():
keyword[if] identifier[key] . identifier[endswith] ( literal[string] ):
keyword[continue]
identifier[rvalue] = identifier[record] . identifier[get] ( identifier[key] , keyword[None] )
keyword[if] identifier[evalue] keyword[and] identifier[rvalue] keyword[and] identifier[evalue] != identifier[rvalue] :
identifier[self] . identifier[log] . identifier[warn] ( literal[string] , identifier[msg_id] , identifier[rvalue] , identifier[key] , identifier[evalue] )
keyword[elif] identifier[evalue] keyword[and] keyword[not] identifier[rvalue] :
identifier[record] [ identifier[key] ]= identifier[evalue]
keyword[try] :
identifier[self] . identifier[db] . identifier[update_record] ( identifier[msg_id] , identifier[record] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[msg_id] , identifier[exc_info] = keyword[True] )
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[self] . identifier[db] . identifier[add_record] ( identifier[msg_id] , identifier[record] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[msg_id] , identifier[exc_info] = keyword[True] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[msg_id] , identifier[exc_info] = keyword[True] ) | def save_task_request(self, idents, msg):
"""Save the submission of a task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg) # depends on [control=['try'], data=[]]
except Exception:
self.log.error('task::client %r sent invalid task message: %r', client_id, msg, exc_info=True)
return # depends on [control=['except'], data=[]]
record = init_record(msg)
record['client_uuid'] = client_id.decode('ascii')
record['queue'] = 'task'
header = msg['header']
msg_id = header['msg_id']
self.pending.add(msg_id)
self.unassigned.add(msg_id)
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
if existing['resubmitted']:
for key in ('submitted', 'client_uuid', 'buffers'):
# don't clobber these keys on resubmit
# submitted and client_uuid should be different
# and buffers might be big, and shouldn't have changed
record.pop(key) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
# still check content,header which should not change
# but are not expensive to compare as buffers
for (key, evalue) in existing.iteritems():
if key.endswith('buffers'):
# don't compare buffers
continue # depends on [control=['if'], data=[]]
rvalue = record.get(key, None)
if evalue and rvalue and (evalue != rvalue):
self.log.warn('conflicting initial state for record: %r:%r <%r> %r', msg_id, rvalue, key, evalue) # depends on [control=['if'], data=[]]
elif evalue and (not rvalue):
record[key] = evalue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
try:
self.db.update_record(msg_id, record) # depends on [control=['try'], data=[]]
except Exception:
self.log.error('DB Error updating record %r', msg_id, exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
try:
self.db.add_record(msg_id, record) # depends on [control=['try'], data=[]]
except Exception:
self.log.error('DB Error adding record %r', msg_id, exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
except Exception:
self.log.error('DB Error saving task request %r', msg_id, exc_info=True) # depends on [control=['except'], data=[]] |
def get_lu_from_synset(self, syn_id, lemma = None):
"""Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma"""
if not lemma:
return self.get_lus_from_synset(syn_id) #alias
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find( ".//synonyms" )
lu_id = None
synonyms = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split( ':' )[ 0 ].strip()
if synonym != lemma:
synonyms.append( (synonym, elem_synonym.get("c_lu_id")) )
if self.debug:
printf( "synonym add: %s" % synonym )
else:
lu_id = elem_synonym.get( "c_lu_id" ) # get "c_lu_id" attribute
if self.debug:
printf( "lu_id: %s" % lu_id )
printf( "synonym skip lemma: %s" % synonym )
return lu_id, synonyms | def function[get_lu_from_synset, parameter[self, syn_id, lemma]]:
constant[Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma]
if <ast.UnaryOp object at 0x7da20c993970> begin[:]
return[call[name[self].get_lus_from_synset, parameter[name[syn_id]]]]
if <ast.UnaryOp object at 0x7da20c993400> begin[:]
variable[lemma] assign[=] call[name[unicode], parameter[name[lemma], constant[utf-8]]]
variable[root] assign[=] call[name[self].get_synset_xml, parameter[name[syn_id]]]
variable[elem_synonyms] assign[=] call[name[root].find, parameter[constant[.//synonyms]]]
variable[lu_id] assign[=] constant[None]
variable[synonyms] assign[=] list[[]]
for taget[name[elem_synonym]] in starred[name[elem_synonyms]] begin[:]
variable[synonym_str] assign[=] call[name[elem_synonym].get, parameter[constant[c_lu_id-previewtext]]]
variable[synonym] assign[=] call[call[call[name[synonym_str].split, parameter[constant[:]]]][constant[0]].strip, parameter[]]
if compare[name[synonym] not_equal[!=] name[lemma]] begin[:]
call[name[synonyms].append, parameter[tuple[[<ast.Name object at 0x7da18f09da50>, <ast.Call object at 0x7da18f09f3d0>]]]]
if name[self].debug begin[:]
call[name[printf], parameter[binary_operation[constant[synonym add: %s] <ast.Mod object at 0x7da2590d6920> name[synonym]]]]
return[tuple[[<ast.Name object at 0x7da18f09e140>, <ast.Name object at 0x7da18f09e320>]]] | keyword[def] identifier[get_lu_from_synset] ( identifier[self] , identifier[syn_id] , identifier[lemma] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[lemma] :
keyword[return] identifier[self] . identifier[get_lus_from_synset] ( identifier[syn_id] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[lemma] , identifier[unicode] ):
identifier[lemma] = identifier[unicode] ( identifier[lemma] , literal[string] )
identifier[root] = identifier[self] . identifier[get_synset_xml] ( identifier[syn_id] )
identifier[elem_synonyms] = identifier[root] . identifier[find] ( literal[string] )
identifier[lu_id] = keyword[None]
identifier[synonyms] =[]
keyword[for] identifier[elem_synonym] keyword[in] identifier[elem_synonyms] :
identifier[synonym_str] = identifier[elem_synonym] . identifier[get] ( literal[string] )
identifier[synonym] = identifier[synonym_str] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[if] identifier[synonym] != identifier[lemma] :
identifier[synonyms] . identifier[append] (( identifier[synonym] , identifier[elem_synonym] . identifier[get] ( literal[string] )))
keyword[if] identifier[self] . identifier[debug] :
identifier[printf] ( literal[string] % identifier[synonym] )
keyword[else] :
identifier[lu_id] = identifier[elem_synonym] . identifier[get] ( literal[string] )
keyword[if] identifier[self] . identifier[debug] :
identifier[printf] ( literal[string] % identifier[lu_id] )
identifier[printf] ( literal[string] % identifier[synonym] )
keyword[return] identifier[lu_id] , identifier[synonyms] | def get_lu_from_synset(self, syn_id, lemma=None):
"""Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma"""
if not lemma:
return self.get_lus_from_synset(syn_id) #alias # depends on [control=['if'], data=[]]
if not isinstance(lemma, unicode):
lemma = unicode(lemma, 'utf-8') # depends on [control=['if'], data=[]]
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find('.//synonyms')
lu_id = None
synonyms = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get('c_lu_id-previewtext') # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split(':')[0].strip()
if synonym != lemma:
synonyms.append((synonym, elem_synonym.get('c_lu_id')))
if self.debug:
printf('synonym add: %s' % synonym) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['synonym']]
else:
lu_id = elem_synonym.get('c_lu_id') # get "c_lu_id" attribute
if self.debug:
printf('lu_id: %s' % lu_id)
printf('synonym skip lemma: %s' % synonym) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['elem_synonym']]
return (lu_id, synonyms) |
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8') | def function[system, parameter[cmd, data]]:
constant[
pipes the output of a program
]
import module[subprocess]
variable[s] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]]
<ast.Tuple object at 0x7da18dc98160> assign[=] call[name[s].communicate, parameter[name[data]]]
return[call[name[out].decode, parameter[constant[utf8]]]] | keyword[def] identifier[system] ( identifier[cmd] , identifier[data] = keyword[None] ):
literal[string]
keyword[import] identifier[subprocess]
identifier[s] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stdin] = identifier[subprocess] . identifier[PIPE] )
identifier[out] , identifier[err] = identifier[s] . identifier[communicate] ( identifier[data] )
keyword[return] identifier[out] . identifier[decode] ( literal[string] ) | def system(cmd, data=None):
"""
pipes the output of a program
"""
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
(out, err) = s.communicate(data)
return out.decode('utf8') |
def _ensureinmemory(self):
"""Ensure the data is held in memory, not in a file."""
self._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength),
self.len, self._offset) | def function[_ensureinmemory, parameter[self]]:
constant[Ensure the data is held in memory, not in a file.]
call[name[self]._setbytes_unsafe, parameter[call[name[self]._datastore.getbyteslice, parameter[constant[0], name[self]._datastore.bytelength]], name[self].len, name[self]._offset]] | keyword[def] identifier[_ensureinmemory] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_setbytes_unsafe] ( identifier[self] . identifier[_datastore] . identifier[getbyteslice] ( literal[int] , identifier[self] . identifier[_datastore] . identifier[bytelength] ),
identifier[self] . identifier[len] , identifier[self] . identifier[_offset] ) | def _ensureinmemory(self):
"""Ensure the data is held in memory, not in a file."""
self._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength), self.len, self._offset) |
def init (domain, directory, loc=None):
"""Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class."""
global default_language, default_encoding, default_domain, default_directory
default_directory = directory
default_domain = domain
if os.path.isdir(directory):
# get supported languages
for lang in os.listdir(directory):
path = os.path.join(directory, lang, 'LC_MESSAGES')
mo_file = os.path.join(path, '%s.mo' % domain)
if os.path.exists(mo_file):
supported_languages.add(lang)
if loc is None:
loc, encoding = get_locale()
else:
encoding = get_locale()[1]
if loc in supported_languages:
default_language = loc
else:
default_language = "en"
# Even if the default language is not supported, the encoding should
# be installed. Otherwise the Python installation is borked.
default_encoding = encoding
install_language(default_language) | def function[init, parameter[domain, directory, loc]]:
constant[Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class.]
<ast.Global object at 0x7da1b0805b70>
variable[default_directory] assign[=] name[directory]
variable[default_domain] assign[=] name[domain]
if call[name[os].path.isdir, parameter[name[directory]]] begin[:]
for taget[name[lang]] in starred[call[name[os].listdir, parameter[name[directory]]]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[directory], name[lang], constant[LC_MESSAGES]]]
variable[mo_file] assign[=] call[name[os].path.join, parameter[name[path], binary_operation[constant[%s.mo] <ast.Mod object at 0x7da2590d6920> name[domain]]]]
if call[name[os].path.exists, parameter[name[mo_file]]] begin[:]
call[name[supported_languages].add, parameter[name[lang]]]
if compare[name[loc] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b23462c0> assign[=] call[name[get_locale], parameter[]]
if compare[name[loc] in name[supported_languages]] begin[:]
variable[default_language] assign[=] name[loc]
variable[default_encoding] assign[=] name[encoding]
call[name[install_language], parameter[name[default_language]]] | keyword[def] identifier[init] ( identifier[domain] , identifier[directory] , identifier[loc] = keyword[None] ):
literal[string]
keyword[global] identifier[default_language] , identifier[default_encoding] , identifier[default_domain] , identifier[default_directory]
identifier[default_directory] = identifier[directory]
identifier[default_domain] = identifier[domain]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[directory] ):
keyword[for] identifier[lang] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[lang] , literal[string] )
identifier[mo_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] % identifier[domain] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[mo_file] ):
identifier[supported_languages] . identifier[add] ( identifier[lang] )
keyword[if] identifier[loc] keyword[is] keyword[None] :
identifier[loc] , identifier[encoding] = identifier[get_locale] ()
keyword[else] :
identifier[encoding] = identifier[get_locale] ()[ literal[int] ]
keyword[if] identifier[loc] keyword[in] identifier[supported_languages] :
identifier[default_language] = identifier[loc]
keyword[else] :
identifier[default_language] = literal[string]
identifier[default_encoding] = identifier[encoding]
identifier[install_language] ( identifier[default_language] ) | def init(domain, directory, loc=None):
"""Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class."""
global default_language, default_encoding, default_domain, default_directory
default_directory = directory
default_domain = domain
if os.path.isdir(directory):
# get supported languages
for lang in os.listdir(directory):
path = os.path.join(directory, lang, 'LC_MESSAGES')
mo_file = os.path.join(path, '%s.mo' % domain)
if os.path.exists(mo_file):
supported_languages.add(lang) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['lang']] # depends on [control=['if'], data=[]]
if loc is None:
(loc, encoding) = get_locale() # depends on [control=['if'], data=['loc']]
else:
encoding = get_locale()[1]
if loc in supported_languages:
default_language = loc # depends on [control=['if'], data=['loc']]
else:
default_language = 'en'
# Even if the default language is not supported, the encoding should
# be installed. Otherwise the Python installation is borked.
default_encoding = encoding
install_language(default_language) |
def parse_image_spec(spec):
""" Parses out a Publ-Markdown image spec into a tuple of path, args, title """
# I was having trouble coming up with a single RE that did it right,
# so let's just break it down into sub-problems. First, parse out the
# alt text...
match = re.match(r'(.+)\s+\"(.*)\"\s*$', spec)
if match:
spec, title = match.group(1, 2)
else:
title = None
# and now parse out the arglist
match = re.match(r'([^\{]*)(\{(.*)\})\s*$', spec)
if match:
spec = match.group(1)
args = parse_arglist(match.group(3))
else:
args = {}
return spec, args, (title and html.unescape(title)) | def function[parse_image_spec, parameter[spec]]:
constant[ Parses out a Publ-Markdown image spec into a tuple of path, args, title ]
variable[match] assign[=] call[name[re].match, parameter[constant[(.+)\s+\"(.*)\"\s*$], name[spec]]]
if name[match] begin[:]
<ast.Tuple object at 0x7da20c6c52d0> assign[=] call[name[match].group, parameter[constant[1], constant[2]]]
variable[match] assign[=] call[name[re].match, parameter[constant[([^\{]*)(\{(.*)\})\s*$], name[spec]]]
if name[match] begin[:]
variable[spec] assign[=] call[name[match].group, parameter[constant[1]]]
variable[args] assign[=] call[name[parse_arglist], parameter[call[name[match].group, parameter[constant[3]]]]]
return[tuple[[<ast.Name object at 0x7da20c6c6560>, <ast.Name object at 0x7da20c6c75b0>, <ast.BoolOp object at 0x7da20c6c7e20>]]] | keyword[def] identifier[parse_image_spec] ( identifier[spec] ):
literal[string]
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[spec] )
keyword[if] identifier[match] :
identifier[spec] , identifier[title] = identifier[match] . identifier[group] ( literal[int] , literal[int] )
keyword[else] :
identifier[title] = keyword[None]
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[spec] )
keyword[if] identifier[match] :
identifier[spec] = identifier[match] . identifier[group] ( literal[int] )
identifier[args] = identifier[parse_arglist] ( identifier[match] . identifier[group] ( literal[int] ))
keyword[else] :
identifier[args] ={}
keyword[return] identifier[spec] , identifier[args] ,( identifier[title] keyword[and] identifier[html] . identifier[unescape] ( identifier[title] )) | def parse_image_spec(spec):
""" Parses out a Publ-Markdown image spec into a tuple of path, args, title """
# I was having trouble coming up with a single RE that did it right,
# so let's just break it down into sub-problems. First, parse out the
# alt text...
match = re.match('(.+)\\s+\\"(.*)\\"\\s*$', spec)
if match:
(spec, title) = match.group(1, 2) # depends on [control=['if'], data=[]]
else:
title = None
# and now parse out the arglist
match = re.match('([^\\{]*)(\\{(.*)\\})\\s*$', spec)
if match:
spec = match.group(1)
args = parse_arglist(match.group(3)) # depends on [control=['if'], data=[]]
else:
args = {}
return (spec, args, title and html.unescape(title)) |
def request(self, endpoint, method='GET', headers=None, params=None, data=None):
'''
Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
'''
if not headers:
headers = {}
if method in ['POST', 'PATCH', 'PUT']:
headers['Content-Type'] = 'application/json'
url = '/'.join([self.api_url, 'v' + self.api_version, endpoint])
data = json.dumps(data) if data else None
try:
response = requests.request(method=method, url=url, params=params, headers=headers, data=data)
# TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use
except requests.exceptions.Timeout as e:
raise wp_exceptions.TimeoutError(e)
except requests.exceptions.ConnectionError as e:
raise wp_exceptions.ConnectionError(e)
self._validate_response(method, response)
return response | def function[request, parameter[self, endpoint, method, headers, params, data]]:
constant[
Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
]
if <ast.UnaryOp object at 0x7da20e9619c0> begin[:]
variable[headers] assign[=] dictionary[[], []]
if compare[name[method] in list[[<ast.Constant object at 0x7da20e962110>, <ast.Constant object at 0x7da20e962320>, <ast.Constant object at 0x7da20e961a20>]]] begin[:]
call[name[headers]][constant[Content-Type]] assign[=] constant[application/json]
variable[url] assign[=] call[constant[/].join, parameter[list[[<ast.Attribute object at 0x7da20e961270>, <ast.BinOp object at 0x7da20e961600>, <ast.Name object at 0x7da20e962230>]]]]
variable[data] assign[=] <ast.IfExp object at 0x7da20e963b50>
<ast.Try object at 0x7da20e9612a0>
call[name[self]._validate_response, parameter[name[method], name[response]]]
return[name[response]] | keyword[def] identifier[request] ( identifier[self] , identifier[endpoint] , identifier[method] = literal[string] , identifier[headers] = keyword[None] , identifier[params] = keyword[None] , identifier[data] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[headers] :
identifier[headers] ={}
keyword[if] identifier[method] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[headers] [ literal[string] ]= literal[string]
identifier[url] = literal[string] . identifier[join] ([ identifier[self] . identifier[api_url] , literal[string] + identifier[self] . identifier[api_version] , identifier[endpoint] ])
identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] ) keyword[if] identifier[data] keyword[else] keyword[None]
keyword[try] :
identifier[response] = identifier[requests] . identifier[request] ( identifier[method] = identifier[method] , identifier[url] = identifier[url] , identifier[params] = identifier[params] , identifier[headers] = identifier[headers] , identifier[data] = identifier[data] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[Timeout] keyword[as] identifier[e] :
keyword[raise] identifier[wp_exceptions] . identifier[TimeoutError] ( identifier[e] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[e] :
keyword[raise] identifier[wp_exceptions] . identifier[ConnectionError] ( identifier[e] )
identifier[self] . identifier[_validate_response] ( identifier[method] , identifier[response] )
keyword[return] identifier[response] | def request(self, endpoint, method='GET', headers=None, params=None, data=None):
"""
Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
"""
if not headers:
headers = {} # depends on [control=['if'], data=[]]
if method in ['POST', 'PATCH', 'PUT']:
headers['Content-Type'] = 'application/json' # depends on [control=['if'], data=[]]
url = '/'.join([self.api_url, 'v' + self.api_version, endpoint])
data = json.dumps(data) if data else None
try:
response = requests.request(method=method, url=url, params=params, headers=headers, data=data) # depends on [control=['try'], data=[]]
# TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use
except requests.exceptions.Timeout as e:
raise wp_exceptions.TimeoutError(e) # depends on [control=['except'], data=['e']]
except requests.exceptions.ConnectionError as e:
raise wp_exceptions.ConnectionError(e) # depends on [control=['except'], data=['e']]
self._validate_response(method, response)
return response |
def modify_calendar_resource(self, calres, attrs):
"""
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyCalendarResource', {
'id': self._get_or_fetch_id(
calres, self.get_calendar_resource),
'a': attrs
}) | def function[modify_calendar_resource, parameter[self, calres, attrs]]:
constant[
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
]
variable[attrs] assign[=] <ast.ListComp object at 0x7da18dc04e80>
call[name[self].request, parameter[constant[ModifyCalendarResource], dictionary[[<ast.Constant object at 0x7da18dc053c0>, <ast.Constant object at 0x7da18dc04130>], [<ast.Call object at 0x7da18dc05ea0>, <ast.Name object at 0x7da18dc07f10>]]]] | keyword[def] identifier[modify_calendar_resource] ( identifier[self] , identifier[calres] , identifier[attrs] ):
literal[string]
identifier[attrs] =[{ literal[string] : identifier[k] , literal[string] : identifier[v] } keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] . identifier[items] ()]
identifier[self] . identifier[request] ( literal[string] ,{
literal[string] : identifier[self] . identifier[_get_or_fetch_id] (
identifier[calres] , identifier[self] . identifier[get_calendar_resource] ),
literal[string] : identifier[attrs]
}) | def modify_calendar_resource(self, calres, attrs):
"""
:param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for (k, v) in attrs.items()]
self.request('ModifyCalendarResource', {'id': self._get_or_fetch_id(calres, self.get_calendar_resource), 'a': attrs}) |
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = force_text(request.build_absolute_uri(callback))
raw_token = self.get_request_token(request, callback)
token, secret = self.parse_raw_token(raw_token)
if token is not None and secret is not None:
request.session[self.session_key] = raw_token
return {
'oauth_token': token,
'oauth_callback': callback,
} | def function[get_redirect_args, parameter[self, request, callback]]:
constant[Get request parameters for redirect url.]
variable[callback] assign[=] call[name[force_text], parameter[call[name[request].build_absolute_uri, parameter[name[callback]]]]]
variable[raw_token] assign[=] call[name[self].get_request_token, parameter[name[request], name[callback]]]
<ast.Tuple object at 0x7da1b254d630> assign[=] call[name[self].parse_raw_token, parameter[name[raw_token]]]
if <ast.BoolOp object at 0x7da1b254f5e0> begin[:]
call[name[request].session][name[self].session_key] assign[=] name[raw_token]
return[dictionary[[<ast.Constant object at 0x7da1b254e260>, <ast.Constant object at 0x7da1b264ad70>], [<ast.Name object at 0x7da1b26494e0>, <ast.Name object at 0x7da1b26487f0>]]] | keyword[def] identifier[get_redirect_args] ( identifier[self] , identifier[request] , identifier[callback] ):
literal[string]
identifier[callback] = identifier[force_text] ( identifier[request] . identifier[build_absolute_uri] ( identifier[callback] ))
identifier[raw_token] = identifier[self] . identifier[get_request_token] ( identifier[request] , identifier[callback] )
identifier[token] , identifier[secret] = identifier[self] . identifier[parse_raw_token] ( identifier[raw_token] )
keyword[if] identifier[token] keyword[is] keyword[not] keyword[None] keyword[and] identifier[secret] keyword[is] keyword[not] keyword[None] :
identifier[request] . identifier[session] [ identifier[self] . identifier[session_key] ]= identifier[raw_token]
keyword[return] {
literal[string] : identifier[token] ,
literal[string] : identifier[callback] ,
} | def get_redirect_args(self, request, callback):
"""Get request parameters for redirect url."""
callback = force_text(request.build_absolute_uri(callback))
raw_token = self.get_request_token(request, callback)
(token, secret) = self.parse_raw_token(raw_token)
if token is not None and secret is not None:
request.session[self.session_key] = raw_token # depends on [control=['if'], data=[]]
return {'oauth_token': token, 'oauth_callback': callback} |
def parse_output(output, keep=("INCLUDE", "LIB", "LIBPATH", "PATH")):
"""
Parse output from running visual c++/studios vcvarsall.bat and running set
To capture the values listed in keep
"""
# dkeep is a dict associating key: path_list, where key is one item from
# keep, and pat_list the associated list of paths
dkeep = dict([(i, []) for i in keep])
# rdk will keep the regex to match the .bat file output line starts
rdk = {}
for i in keep:
rdk[i] = re.compile('%s=(.*)' % i, re.I)
def add_env(rmatch, key, dkeep=dkeep):
path_list = rmatch.group(1).split(os.pathsep)
for path in path_list:
# Do not add empty paths (when a var ends with ;)
if path:
# XXX: For some reason, VC98 .bat file adds "" around the PATH
# values, and it screws up the environment later, so we strip
# it.
path = path.strip('"')
dkeep[key].append(str(path))
for line in output.splitlines():
for k, value in rdk.items():
match = value.match(line)
if match:
add_env(match, k)
return dkeep | def function[parse_output, parameter[output, keep]]:
constant[
Parse output from running visual c++/studios vcvarsall.bat and running set
To capture the values listed in keep
]
variable[dkeep] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18f58c760>]]
variable[rdk] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[keep]] begin[:]
call[name[rdk]][name[i]] assign[=] call[name[re].compile, parameter[binary_operation[constant[%s=(.*)] <ast.Mod object at 0x7da2590d6920> name[i]], name[re].I]]
def function[add_env, parameter[rmatch, key, dkeep]]:
variable[path_list] assign[=] call[call[name[rmatch].group, parameter[constant[1]]].split, parameter[name[os].pathsep]]
for taget[name[path]] in starred[name[path_list]] begin[:]
if name[path] begin[:]
variable[path] assign[=] call[name[path].strip, parameter[constant["]]]
call[call[name[dkeep]][name[key]].append, parameter[call[name[str], parameter[name[path]]]]]
for taget[name[line]] in starred[call[name[output].splitlines, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f58d0c0>, <ast.Name object at 0x7da18f58f790>]]] in starred[call[name[rdk].items, parameter[]]] begin[:]
variable[match] assign[=] call[name[value].match, parameter[name[line]]]
if name[match] begin[:]
call[name[add_env], parameter[name[match], name[k]]]
return[name[dkeep]] | keyword[def] identifier[parse_output] ( identifier[output] , identifier[keep] =( literal[string] , literal[string] , literal[string] , literal[string] )):
literal[string]
identifier[dkeep] = identifier[dict] ([( identifier[i] ,[]) keyword[for] identifier[i] keyword[in] identifier[keep] ])
identifier[rdk] ={}
keyword[for] identifier[i] keyword[in] identifier[keep] :
identifier[rdk] [ identifier[i] ]= identifier[re] . identifier[compile] ( literal[string] % identifier[i] , identifier[re] . identifier[I] )
keyword[def] identifier[add_env] ( identifier[rmatch] , identifier[key] , identifier[dkeep] = identifier[dkeep] ):
identifier[path_list] = identifier[rmatch] . identifier[group] ( literal[int] ). identifier[split] ( identifier[os] . identifier[pathsep] )
keyword[for] identifier[path] keyword[in] identifier[path_list] :
keyword[if] identifier[path] :
identifier[path] = identifier[path] . identifier[strip] ( literal[string] )
identifier[dkeep] [ identifier[key] ]. identifier[append] ( identifier[str] ( identifier[path] ))
keyword[for] identifier[line] keyword[in] identifier[output] . identifier[splitlines] ():
keyword[for] identifier[k] , identifier[value] keyword[in] identifier[rdk] . identifier[items] ():
identifier[match] = identifier[value] . identifier[match] ( identifier[line] )
keyword[if] identifier[match] :
identifier[add_env] ( identifier[match] , identifier[k] )
keyword[return] identifier[dkeep] | def parse_output(output, keep=('INCLUDE', 'LIB', 'LIBPATH', 'PATH')):
"""
Parse output from running visual c++/studios vcvarsall.bat and running set
To capture the values listed in keep
"""
# dkeep is a dict associating key: path_list, where key is one item from
# keep, and pat_list the associated list of paths
dkeep = dict([(i, []) for i in keep])
# rdk will keep the regex to match the .bat file output line starts
rdk = {}
for i in keep:
rdk[i] = re.compile('%s=(.*)' % i, re.I) # depends on [control=['for'], data=['i']]
def add_env(rmatch, key, dkeep=dkeep):
path_list = rmatch.group(1).split(os.pathsep)
for path in path_list:
# Do not add empty paths (when a var ends with ;)
if path:
# XXX: For some reason, VC98 .bat file adds "" around the PATH
# values, and it screws up the environment later, so we strip
# it.
path = path.strip('"')
dkeep[key].append(str(path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
for line in output.splitlines():
for (k, value) in rdk.items():
match = value.match(line)
if match:
add_env(match, k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['line']]
return dkeep |
def read_lock(self):
'''Find out who currently owns the namespace global lock.
This is purely a diagnostic tool. If you are trying to get
the global lock, it is better to just call :meth:`lock`, which
will atomically get the lock if possible and retry.
:return: session identifier of the lock holder, or :const:`None`
'''
return redis.Redis(connection_pool=self.pool).get(self._lock_name) | def function[read_lock, parameter[self]]:
constant[Find out who currently owns the namespace global lock.
This is purely a diagnostic tool. If you are trying to get
the global lock, it is better to just call :meth:`lock`, which
will atomically get the lock if possible and retry.
:return: session identifier of the lock holder, or :const:`None`
]
return[call[call[name[redis].Redis, parameter[]].get, parameter[name[self]._lock_name]]] | keyword[def] identifier[read_lock] ( identifier[self] ):
literal[string]
keyword[return] identifier[redis] . identifier[Redis] ( identifier[connection_pool] = identifier[self] . identifier[pool] ). identifier[get] ( identifier[self] . identifier[_lock_name] ) | def read_lock(self):
"""Find out who currently owns the namespace global lock.
This is purely a diagnostic tool. If you are trying to get
the global lock, it is better to just call :meth:`lock`, which
will atomically get the lock if possible and retry.
:return: session identifier of the lock holder, or :const:`None`
"""
return redis.Redis(connection_pool=self.pool).get(self._lock_name) |
def k_nearest_approx(self, vec, k):
"""Get the k nearest neighbors of a vector (in terms of cosine similarity).
:param (np.array) vec: query vector
:param (int) k: number of top neighbors to return
:return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order
"""
if not hasattr(self, 'lshf'):
self.lshf = self._init_lsh_forest()
# TODO(kelvin): make this inner product score, to be consistent with k_nearest
distances, neighbors = self.lshf.kneighbors([vec], n_neighbors=k, return_distance=True)
scores = np.subtract(1, distances)
nbr_score_pairs = self._word_to_score(np.squeeze(neighbors), np.squeeze(scores))
return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True) | def function[k_nearest_approx, parameter[self, vec, k]]:
constant[Get the k nearest neighbors of a vector (in terms of cosine similarity).
:param (np.array) vec: query vector
:param (int) k: number of top neighbors to return
:return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order
]
if <ast.UnaryOp object at 0x7da1b101a500> begin[:]
name[self].lshf assign[=] call[name[self]._init_lsh_forest, parameter[]]
<ast.Tuple object at 0x7da1b101ac20> assign[=] call[name[self].lshf.kneighbors, parameter[list[[<ast.Name object at 0x7da1b101b3d0>]]]]
variable[scores] assign[=] call[name[np].subtract, parameter[constant[1], name[distances]]]
variable[nbr_score_pairs] assign[=] call[name[self]._word_to_score, parameter[call[name[np].squeeze, parameter[name[neighbors]]], call[name[np].squeeze, parameter[name[scores]]]]]
return[call[name[sorted], parameter[call[name[nbr_score_pairs].items, parameter[]]]]] | keyword[def] identifier[k_nearest_approx] ( identifier[self] , identifier[vec] , identifier[k] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[lshf] = identifier[self] . identifier[_init_lsh_forest] ()
identifier[distances] , identifier[neighbors] = identifier[self] . identifier[lshf] . identifier[kneighbors] ([ identifier[vec] ], identifier[n_neighbors] = identifier[k] , identifier[return_distance] = keyword[True] )
identifier[scores] = identifier[np] . identifier[subtract] ( literal[int] , identifier[distances] )
identifier[nbr_score_pairs] = identifier[self] . identifier[_word_to_score] ( identifier[np] . identifier[squeeze] ( identifier[neighbors] ), identifier[np] . identifier[squeeze] ( identifier[scores] ))
keyword[return] identifier[sorted] ( identifier[nbr_score_pairs] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] ) | def k_nearest_approx(self, vec, k):
"""Get the k nearest neighbors of a vector (in terms of cosine similarity).
:param (np.array) vec: query vector
:param (int) k: number of top neighbors to return
:return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order
"""
if not hasattr(self, 'lshf'):
self.lshf = self._init_lsh_forest() # depends on [control=['if'], data=[]]
# TODO(kelvin): make this inner product score, to be consistent with k_nearest
(distances, neighbors) = self.lshf.kneighbors([vec], n_neighbors=k, return_distance=True)
scores = np.subtract(1, distances)
nbr_score_pairs = self._word_to_score(np.squeeze(neighbors), np.squeeze(scores))
return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True) |
def removeSegment(self, segment, preserveCurve=False):
"""
Remove segment from the contour.
If ``preserveCurve`` is set to ``True`` an attempt
will be made to preserve the shape of the curve
if the environment supports that functionality.
"""
if not isinstance(segment, int):
segment = self.segments.index(segment)
segment = normalizers.normalizeIndex(segment)
if segment >= self._len__segments():
raise ValueError("No segment located at index %d." % segment)
preserveCurve = normalizers.normalizeBoolean(preserveCurve)
self._removeSegment(segment, preserveCurve) | def function[removeSegment, parameter[self, segment, preserveCurve]]:
constant[
Remove segment from the contour.
If ``preserveCurve`` is set to ``True`` an attempt
will be made to preserve the shape of the curve
if the environment supports that functionality.
]
if <ast.UnaryOp object at 0x7da2041da650> begin[:]
variable[segment] assign[=] call[name[self].segments.index, parameter[name[segment]]]
variable[segment] assign[=] call[name[normalizers].normalizeIndex, parameter[name[segment]]]
if compare[name[segment] greater_or_equal[>=] call[name[self]._len__segments, parameter[]]] begin[:]
<ast.Raise object at 0x7da204963610>
variable[preserveCurve] assign[=] call[name[normalizers].normalizeBoolean, parameter[name[preserveCurve]]]
call[name[self]._removeSegment, parameter[name[segment], name[preserveCurve]]] | keyword[def] identifier[removeSegment] ( identifier[self] , identifier[segment] , identifier[preserveCurve] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[segment] , identifier[int] ):
identifier[segment] = identifier[self] . identifier[segments] . identifier[index] ( identifier[segment] )
identifier[segment] = identifier[normalizers] . identifier[normalizeIndex] ( identifier[segment] )
keyword[if] identifier[segment] >= identifier[self] . identifier[_len__segments] ():
keyword[raise] identifier[ValueError] ( literal[string] % identifier[segment] )
identifier[preserveCurve] = identifier[normalizers] . identifier[normalizeBoolean] ( identifier[preserveCurve] )
identifier[self] . identifier[_removeSegment] ( identifier[segment] , identifier[preserveCurve] ) | def removeSegment(self, segment, preserveCurve=False):
"""
Remove segment from the contour.
If ``preserveCurve`` is set to ``True`` an attempt
will be made to preserve the shape of the curve
if the environment supports that functionality.
"""
if not isinstance(segment, int):
segment = self.segments.index(segment) # depends on [control=['if'], data=[]]
segment = normalizers.normalizeIndex(segment)
if segment >= self._len__segments():
raise ValueError('No segment located at index %d.' % segment) # depends on [control=['if'], data=['segment']]
preserveCurve = normalizers.normalizeBoolean(preserveCurve)
self._removeSegment(segment, preserveCurve) |
def restart(self):
"""Restart this Docker container."""
yield from self.manager.query("POST", "containers/{}/restart".format(self._cid))
log.info("Docker container '{name}' [{image}] restarted".format(
name=self._name, image=self._image)) | def function[restart, parameter[self]]:
constant[Restart this Docker container.]
<ast.YieldFrom object at 0x7da18f722bc0>
call[name[log].info, parameter[call[constant[Docker container '{name}' [{image}] restarted].format, parameter[]]]] | keyword[def] identifier[restart] ( identifier[self] ):
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[manager] . identifier[query] ( literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[_cid] ))
identifier[log] . identifier[info] ( literal[string] . identifier[format] (
identifier[name] = identifier[self] . identifier[_name] , identifier[image] = identifier[self] . identifier[_image] )) | def restart(self):
"""Restart this Docker container."""
yield from self.manager.query('POST', 'containers/{}/restart'.format(self._cid))
log.info("Docker container '{name}' [{image}] restarted".format(name=self._name, image=self._image)) |
def ccs_normalize(compIM, ccsnorm):
""" normalize the ccs representation
Parameters
----------
compIM: 2d array
The CCS image in CCS representation
ccsnorm: 2d array
The normalization matrix in ccs representation
Returns
-------
compIM: 2d array
The normalized CCS image
Notes
-----
(basically an element wise division for CCS)
Should probably not be used from outside
"""
compIM = np.asarray(compIM)
ccsnorm = np.asarray(ccsnorm)
ys = ccsnorm.shape[0]
xs = ccsnorm.shape[1]
# start with first column
ccsnorm[2::2, 0] = ccsnorm[1:ys - 1:2, 0]
# continue with middle columns
ccsnorm[:, 2::2] = ccsnorm[:, 1:xs - 1:2]
# finish whith last row if even
if xs % 2 is 0:
ccsnorm[2::2, xs - 1] = ccsnorm[1:ys - 1:2, xs - 1]
# solve problem with 0/0
ccsnorm[ccsnorm == 0] = np.nextafter(0., 1., dtype = ccsnorm.dtype)
res = compIM / ccsnorm
return res | def function[ccs_normalize, parameter[compIM, ccsnorm]]:
constant[ normalize the ccs representation
Parameters
----------
compIM: 2d array
The CCS image in CCS representation
ccsnorm: 2d array
The normalization matrix in ccs representation
Returns
-------
compIM: 2d array
The normalized CCS image
Notes
-----
(basically an element wise division for CCS)
Should probably not be used from outside
]
variable[compIM] assign[=] call[name[np].asarray, parameter[name[compIM]]]
variable[ccsnorm] assign[=] call[name[np].asarray, parameter[name[ccsnorm]]]
variable[ys] assign[=] call[name[ccsnorm].shape][constant[0]]
variable[xs] assign[=] call[name[ccsnorm].shape][constant[1]]
call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da18c4cded0>, <ast.Constant object at 0x7da18c4cc220>]]] assign[=] call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da204567ee0>, <ast.Constant object at 0x7da204564160>]]]
call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da204567640>, <ast.Slice object at 0x7da204566110>]]] assign[=] call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da2045661d0>, <ast.Slice object at 0x7da204567be0>]]]
if compare[binary_operation[name[xs] <ast.Mod object at 0x7da2590d6920> constant[2]] is constant[0]] begin[:]
call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da204564100>, <ast.BinOp object at 0x7da2045672e0>]]] assign[=] call[name[ccsnorm]][tuple[[<ast.Slice object at 0x7da204565870>, <ast.BinOp object at 0x7da2054a6380>]]]
call[name[ccsnorm]][compare[name[ccsnorm] equal[==] constant[0]]] assign[=] call[name[np].nextafter, parameter[constant[0.0], constant[1.0]]]
variable[res] assign[=] binary_operation[name[compIM] / name[ccsnorm]]
return[name[res]] | keyword[def] identifier[ccs_normalize] ( identifier[compIM] , identifier[ccsnorm] ):
literal[string]
identifier[compIM] = identifier[np] . identifier[asarray] ( identifier[compIM] )
identifier[ccsnorm] = identifier[np] . identifier[asarray] ( identifier[ccsnorm] )
identifier[ys] = identifier[ccsnorm] . identifier[shape] [ literal[int] ]
identifier[xs] = identifier[ccsnorm] . identifier[shape] [ literal[int] ]
identifier[ccsnorm] [ literal[int] :: literal[int] , literal[int] ]= identifier[ccsnorm] [ literal[int] : identifier[ys] - literal[int] : literal[int] , literal[int] ]
identifier[ccsnorm] [:, literal[int] :: literal[int] ]= identifier[ccsnorm] [:, literal[int] : identifier[xs] - literal[int] : literal[int] ]
keyword[if] identifier[xs] % literal[int] keyword[is] literal[int] :
identifier[ccsnorm] [ literal[int] :: literal[int] , identifier[xs] - literal[int] ]= identifier[ccsnorm] [ literal[int] : identifier[ys] - literal[int] : literal[int] , identifier[xs] - literal[int] ]
identifier[ccsnorm] [ identifier[ccsnorm] == literal[int] ]= identifier[np] . identifier[nextafter] ( literal[int] , literal[int] , identifier[dtype] = identifier[ccsnorm] . identifier[dtype] )
identifier[res] = identifier[compIM] / identifier[ccsnorm]
keyword[return] identifier[res] | def ccs_normalize(compIM, ccsnorm):
""" normalize the ccs representation
Parameters
----------
compIM: 2d array
The CCS image in CCS representation
ccsnorm: 2d array
The normalization matrix in ccs representation
Returns
-------
compIM: 2d array
The normalized CCS image
Notes
-----
(basically an element wise division for CCS)
Should probably not be used from outside
"""
compIM = np.asarray(compIM)
ccsnorm = np.asarray(ccsnorm)
ys = ccsnorm.shape[0]
xs = ccsnorm.shape[1]
# start with first column
ccsnorm[2::2, 0] = ccsnorm[1:ys - 1:2, 0]
# continue with middle columns
ccsnorm[:, 2::2] = ccsnorm[:, 1:xs - 1:2]
# finish whith last row if even
if xs % 2 is 0:
ccsnorm[2::2, xs - 1] = ccsnorm[1:ys - 1:2, xs - 1] # depends on [control=['if'], data=[]]
# solve problem with 0/0
ccsnorm[ccsnorm == 0] = np.nextafter(0.0, 1.0, dtype=ccsnorm.dtype)
res = compIM / ccsnorm
return res |
def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (
exception, tries_remaining, delay) | def function[example_exc_handler, parameter[tries_remaining, exception, delay]]:
constant[Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
]
tuple[[<ast.BinOp object at 0x7da2044c30a0>, <ast.BinOp object at 0x7da2044c10f0>]] | keyword[def] identifier[example_exc_handler] ( identifier[tries_remaining] , identifier[exception] , identifier[delay] ):
literal[string]
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] %(
identifier[exception] , identifier[tries_remaining] , identifier[delay] ) | def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
(print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)) |
def _readall(self):
"""
Read and return all the bytes from the stream until EOF.
Returns:
bytes: Object content
"""
with _handle_oss_error():
return self._bucket.get_object(key=self._key).read() | def function[_readall, parameter[self]]:
constant[
Read and return all the bytes from the stream until EOF.
Returns:
bytes: Object content
]
with call[name[_handle_oss_error], parameter[]] begin[:]
return[call[call[name[self]._bucket.get_object, parameter[]].read, parameter[]]] | keyword[def] identifier[_readall] ( identifier[self] ):
literal[string]
keyword[with] identifier[_handle_oss_error] ():
keyword[return] identifier[self] . identifier[_bucket] . identifier[get_object] ( identifier[key] = identifier[self] . identifier[_key] ). identifier[read] () | def _readall(self):
"""
Read and return all the bytes from the stream until EOF.
Returns:
bytes: Object content
"""
with _handle_oss_error():
return self._bucket.get_object(key=self._key).read() # depends on [control=['with'], data=[]] |
def chi_square(h1, h2): # 23 us @array, 49 us @list \w 100
r"""
Chi-square distance.
Measure how unlikely it is that one distribution (histogram) was drawn from the
other. The Chi-square distance between two histograms :math:`H` and :math:`H'` of size
:math:`m` is defined as:
.. math::
d_{\chi^2}(H, H') = \sum_{m=1}^M
\frac{
(H_m - H'_m)^2
}{
H_m + H'_m
}
*Attributes:*
- semimetric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 2]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
Returns
-------
chi_square : float
Chi-square distance.
"""
h1, h2 = __prepare_histogram(h1, h2)
old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
result = scipy.square(h1 - h2) / (h1 + h2)
scipy.seterr(**old_err_state)
result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also
return scipy.sum(result) | def function[chi_square, parameter[h1, h2]]:
constant[
Chi-square distance.
Measure how unlikely it is that one distribution (histogram) was drawn from the
other. The Chi-square distance between two histograms :math:`H` and :math:`H'` of size
:math:`m` is defined as:
.. math::
d_{\chi^2}(H, H') = \sum_{m=1}^M
\frac{
(H_m - H'_m)^2
}{
H_m + H'_m
}
*Attributes:*
- semimetric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 2]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
Returns
-------
chi_square : float
Chi-square distance.
]
<ast.Tuple object at 0x7da2044c2fe0> assign[=] call[name[__prepare_histogram], parameter[name[h1], name[h2]]]
variable[old_err_state] assign[=] call[name[scipy].seterr, parameter[]]
variable[result] assign[=] binary_operation[call[name[scipy].square, parameter[binary_operation[name[h1] - name[h2]]]] / binary_operation[name[h1] + name[h2]]]
call[name[scipy].seterr, parameter[]]
call[name[result]][call[name[scipy].isnan, parameter[name[result]]]] assign[=] constant[0]
return[call[name[scipy].sum, parameter[name[result]]]] | keyword[def] identifier[chi_square] ( identifier[h1] , identifier[h2] ):
literal[string]
identifier[h1] , identifier[h2] = identifier[__prepare_histogram] ( identifier[h1] , identifier[h2] )
identifier[old_err_state] = identifier[scipy] . identifier[seterr] ( identifier[invalid] = literal[string] )
identifier[result] = identifier[scipy] . identifier[square] ( identifier[h1] - identifier[h2] )/( identifier[h1] + identifier[h2] )
identifier[scipy] . identifier[seterr] (** identifier[old_err_state] )
identifier[result] [ identifier[scipy] . identifier[isnan] ( identifier[result] )]= literal[int]
keyword[return] identifier[scipy] . identifier[sum] ( identifier[result] ) | def chi_square(h1, h2): # 23 us @array, 49 us @list \w 100
"\n Chi-square distance.\n \n Measure how unlikely it is that one distribution (histogram) was drawn from the\n other. The Chi-square distance between two histograms :math:`H` and :math:`H'` of size\n :math:`m` is defined as:\n \n .. math::\n \n d_{\\chi^2}(H, H') = \\sum_{m=1}^M\n \\frac{\n (H_m - H'_m)^2\n }{\n H_m + H'_m\n }\n \n *Attributes:*\n\n - semimetric\n \n *Attributes for normalized histograms:*\n\n - :math:`d(H, H')\\in[0, 2]`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-normalized histograms:*\n\n - :math:`d(H, H')\\in[0, \\infty)`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-equal histograms:*\n\n - not applicable \n \n Parameters\n ----------\n h1 : sequence\n The first histogram.\n h2 : sequence\n The second histogram.\n \n Returns\n -------\n chi_square : float\n Chi-square distance.\n "
(h1, h2) = __prepare_histogram(h1, h2)
old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
result = scipy.square(h1 - h2) / (h1 + h2)
scipy.seterr(**old_err_state)
result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also
return scipy.sum(result) |
def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
"""
if not session:
session = object_session(self)
return self.get_tree(
session,
json=json,
json_fields=json_fields,
query=self._drilldown_query
) | def function[drilldown_tree, parameter[self, session, json, json_fields]]:
constant[ This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
]
if <ast.UnaryOp object at 0x7da1b10c1c90> begin[:]
variable[session] assign[=] call[name[object_session], parameter[name[self]]]
return[call[name[self].get_tree, parameter[name[session]]]] | keyword[def] identifier[drilldown_tree] ( identifier[self] , identifier[session] = keyword[None] , identifier[json] = keyword[False] , identifier[json_fields] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[session] :
identifier[session] = identifier[object_session] ( identifier[self] )
keyword[return] identifier[self] . identifier[get_tree] (
identifier[session] ,
identifier[json] = identifier[json] ,
identifier[json_fields] = identifier[json_fields] ,
identifier[query] = identifier[self] . identifier[_drilldown_query]
) | def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
"""
if not session:
session = object_session(self) # depends on [control=['if'], data=[]]
return self.get_tree(session, json=json, json_fields=json_fields, query=self._drilldown_query) |
def get_files_by_name(self, name, parent=None):
"""
Gets all the files references that have the given name, under the specified parent PBXGroup object or
PBXGroup id.
:param name: name of the file to be retrieved
:param parent: PBXGroup that should be used to narrow the search or None to retrieve files from all project
:return: List of all PBXFileReference that match the name and parent criteria.
"""
if parent is not None:
parent = self._get_parent_group(parent)
files = []
for file_ref in self.objects.get_objects_in_section(u'PBXFileReference'):
if file_ref.get_name() == name and (parent is None or parent.has_child(file_ref.get_id())):
files.append(file_ref)
return files | def function[get_files_by_name, parameter[self, name, parent]]:
constant[
Gets all the files references that have the given name, under the specified parent PBXGroup object or
PBXGroup id.
:param name: name of the file to be retrieved
:param parent: PBXGroup that should be used to narrow the search or None to retrieve files from all project
:return: List of all PBXFileReference that match the name and parent criteria.
]
if compare[name[parent] is_not constant[None]] begin[:]
variable[parent] assign[=] call[name[self]._get_parent_group, parameter[name[parent]]]
variable[files] assign[=] list[[]]
for taget[name[file_ref]] in starred[call[name[self].objects.get_objects_in_section, parameter[constant[PBXFileReference]]]] begin[:]
if <ast.BoolOp object at 0x7da2054a4520> begin[:]
call[name[files].append, parameter[name[file_ref]]]
return[name[files]] | keyword[def] identifier[get_files_by_name] ( identifier[self] , identifier[name] , identifier[parent] = keyword[None] ):
literal[string]
keyword[if] identifier[parent] keyword[is] keyword[not] keyword[None] :
identifier[parent] = identifier[self] . identifier[_get_parent_group] ( identifier[parent] )
identifier[files] =[]
keyword[for] identifier[file_ref] keyword[in] identifier[self] . identifier[objects] . identifier[get_objects_in_section] ( literal[string] ):
keyword[if] identifier[file_ref] . identifier[get_name] ()== identifier[name] keyword[and] ( identifier[parent] keyword[is] keyword[None] keyword[or] identifier[parent] . identifier[has_child] ( identifier[file_ref] . identifier[get_id] ())):
identifier[files] . identifier[append] ( identifier[file_ref] )
keyword[return] identifier[files] | def get_files_by_name(self, name, parent=None):
"""
Gets all the files references that have the given name, under the specified parent PBXGroup object or
PBXGroup id.
:param name: name of the file to be retrieved
:param parent: PBXGroup that should be used to narrow the search or None to retrieve files from all project
:return: List of all PBXFileReference that match the name and parent criteria.
"""
if parent is not None:
parent = self._get_parent_group(parent) # depends on [control=['if'], data=['parent']]
files = []
for file_ref in self.objects.get_objects_in_section(u'PBXFileReference'):
if file_ref.get_name() == name and (parent is None or parent.has_child(file_ref.get_id())):
files.append(file_ref) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file_ref']]
return files |
def path_for(self, *args):
"""Path containing _root_path"""
if args and args[0].startswith(os.path.sep):
return os.path.join(*args)
return os.path.join(self._root_path or os.getcwd(), *args) | def function[path_for, parameter[self]]:
constant[Path containing _root_path]
if <ast.BoolOp object at 0x7da1b1703940> begin[:]
return[call[name[os].path.join, parameter[<ast.Starred object at 0x7da1b1720970>]]]
return[call[name[os].path.join, parameter[<ast.BoolOp object at 0x7da1b17d6080>, <ast.Starred object at 0x7da1b17d4160>]]] | keyword[def] identifier[path_for] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[args] keyword[and] identifier[args] [ literal[int] ]. identifier[startswith] ( identifier[os] . identifier[path] . identifier[sep] ):
keyword[return] identifier[os] . identifier[path] . identifier[join] (* identifier[args] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_root_path] keyword[or] identifier[os] . identifier[getcwd] (),* identifier[args] ) | def path_for(self, *args):
"""Path containing _root_path"""
if args and args[0].startswith(os.path.sep):
return os.path.join(*args) # depends on [control=['if'], data=[]]
return os.path.join(self._root_path or os.getcwd(), *args) |
def get(self):
'''This handles GET requests.
Returns the requested checkplot pickle's information as JSON.
Requires a pre-shared secret `key` argument for the operation to
complete successfully. This is obtained from a command-line argument.
'''
provided_key = self.get_argument('key',default=None)
if not provided_key:
LOGGER.error('standalone URL hit but no secret key provided')
retdict = {'status':'error',
'message':('standalone URL hit but '
'no secret key provided'),
'result':None,
'readonly':True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish()
else:
provided_key = xhtml_escape(provided_key)
if not _time_independent_equals(provided_key,
self.secret):
LOGGER.error('secret key provided does not match known key')
retdict = {'status':'error',
'message':('standalone URL hit but '
'no secret key provided'),
'result':None,
'readonly':True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish()
#
# actually start work here
#
LOGGER.info('key auth OK')
checkplotfname = self.get_argument('cp', default=None)
if checkplotfname:
try:
# do the usual safing
cpfpath = xhtml_escape(
base64.b64decode(url_unescape(checkplotfname))
)
except Exception as e:
msg = 'could not decode the incoming payload'
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None,
'readonly':True}
self.set_status(400)
self.write(resultdict)
raise tornado.web.Finish()
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None,
'readonly':True}
self.set_status(404)
self.write(resultdict)
raise tornado.web.Finish()
#
# load the checkplot
#
# this is the async call to the executor
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods']
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm)
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and
cpdict['neighbors'] is not None and
len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs']
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs']
else:
nbrcolordiffs = None
thisnbrdict = {
'objectid':nbr['objectid'],
'objectinfo':{
'ra':nbr['ra'],
'decl':nbr['decl'],
'xpix':nbr['xpix'],
'ypix':nbr['ypix'],
'distarcsec':nbr['dist'],
'magdiffs':nbrmagdiffs,
'colordiffs':nbrcolordiffs
}
}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries
except Exception as e:
LOGGER.error(
"could not load magseries plot for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {
'plot':nbr[pfm][0]['plot'],
'period':nbr[pfm][0]['period'],
'epoch':nbr[pfm][0]['epoch']
}
except Exception as e:
LOGGER.error(
"could not load phased LC plots for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
neighbors.append(thisnbrdict)
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments']
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch']
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram']
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart']
else:
finderchart = None
if ('magseries' in cpdict and
isinstance(cpdict['magseries'], dict) and
'plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning(
"no 'magseries' key present in this "
"checkplot, some plots may be broken..."
)
if 'status' in cpdict:
cpstatus = cpdict['status']
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters']
else:
uifilters = {'psearch_magfilters':None,
'psearch_sigclip':None,
'psearch_timefilters':None}
# this is the initial dict
resultdict = {
'status':'ok',
'message':'found checkplot %s' % os.path.basename(cpfpath),
'readonly':True,
'result':{
'time0':'%.3f' % time0,
'objectid':objectid,
'objectinfo':objectinfo,
'colormagdiagram':colormagdiagram,
'objectcomments':objectcomments,
'varinfo':varinfo,
'uifilters':uifilters,
'neighbors':neighbors,
'xmatch':objectxmatch,
'finderchart':finderchart,
'magseries':magseries,
# fallback in case objectinfo doesn't have ndet
'magseries_ndet':magseries_ndet,
'cpstatus':cpstatus,
'pfmethods':pfmethods
}
}
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch'])
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if (0 in cpdict[key] and
isinstance(cpdict[key][0], dict) and
'lcfit' in cpdict[key][0] and
isinstance(cpdict[key][0]['lcfit'], dict)):
phasedlc0fit = {
'method':(
cpdict[key][0]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][0]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][0]['lcfit']['fitchisq']
),
'params':(
cpdict[key][0][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][0]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch'])
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if (1 in cpdict[key] and
isinstance(cpdict[key][1], dict) and
'lcfit' in cpdict[key][1] and
isinstance(cpdict[key][1]['lcfit'], dict)):
phasedlc1fit = {
'method':(
cpdict[key][1]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][1]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][1]['lcfit']['fitchisq']
),
'params':(
cpdict[key][1][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][1]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch'])
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if (2 in cpdict[key] and
isinstance(cpdict[key][2], dict) and
'lcfit' in cpdict[key][2] and
isinstance(cpdict[key][2]['lcfit'], dict)):
phasedlc2fit = {
'method':(
cpdict[key][2]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][2]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][2]['lcfit']['fitchisq']
),
'params':(
cpdict[key][2][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][2]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc2fit = None
resultdict['result'][key] = {
'nbestperiods':cpdict[key]['nbestperiods'],
'periodogram':periodogram,
'bestperiod':cpdict[key]['bestperiod'],
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
'lcfit':phasedlc0fit,
},
'phasedlc1':{
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
'lcfit':phasedlc1fit,
},
'phasedlc2':{
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
'lcfit':phasedlc2fit,
},
}
#
# end of processing per pfmethod
#
self.set_header('Content-Type','application/json; charset=UTF-8')
self.write(resultdict)
self.finish()
else:
LOGGER.error('no checkplot file requested')
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':True,
'result':None}
self.status(400)
self.write(resultdict)
self.finish() | def function[get, parameter[self]]:
constant[This handles GET requests.
Returns the requested checkplot pickle's information as JSON.
Requires a pre-shared secret `key` argument for the operation to
complete successfully. This is obtained from a command-line argument.
]
variable[provided_key] assign[=] call[name[self].get_argument, parameter[constant[key]]]
if <ast.UnaryOp object at 0x7da1b2347e80> begin[:]
call[name[LOGGER].error, parameter[constant[standalone URL hit but no secret key provided]]]
variable[retdict] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344220>, <ast.Constant object at 0x7da1b2346b30>, <ast.Constant object at 0x7da1b23475e0>, <ast.Constant object at 0x7da1b2346950>], [<ast.Constant object at 0x7da1b2345d20>, <ast.Constant object at 0x7da1b2347100>, <ast.Constant object at 0x7da1b23448e0>, <ast.Constant object at 0x7da1b2346ec0>]]
call[name[self].set_status, parameter[constant[401]]]
call[name[self].write, parameter[name[retdict]]]
<ast.Raise object at 0x7da1b2345690>
call[name[LOGGER].info, parameter[constant[key auth OK]]]
variable[checkplotfname] assign[=] call[name[self].get_argument, parameter[constant[cp]]]
if name[checkplotfname] begin[:]
<ast.Try object at 0x7da18c4cff10>
call[name[LOGGER].info, parameter[binary_operation[constant[loading %s...] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]]]
if <ast.UnaryOp object at 0x7da18c4ce0b0> begin[:]
variable[msg] assign[=] binary_operation[constant[couldn't find checkplot %s] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]
call[name[LOGGER].error, parameter[name[msg]]]
variable[resultdict] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cc3a0>, <ast.Constant object at 0x7da18c4cf280>, <ast.Constant object at 0x7da18c4cf850>, <ast.Constant object at 0x7da18c4ccf70>], [<ast.Constant object at 0x7da18c4cf610>, <ast.Name object at 0x7da18c4ccee0>, <ast.Constant object at 0x7da18c4cf2e0>, <ast.Constant object at 0x7da18c4ce1d0>]]
call[name[self].set_status, parameter[constant[404]]]
call[name[self].write, parameter[name[resultdict]]]
<ast.Raise object at 0x7da18c4ce920>
variable[cpdict] assign[=] <ast.Yield object at 0x7da18c4cf910>
call[name[LOGGER].info, parameter[binary_operation[constant[loaded %s] <ast.Mod object at 0x7da2590d6920> name[cpfpath]]]]
variable[objectid] assign[=] call[name[cpdict]][constant[objectid]]
variable[objectinfo] assign[=] call[name[cpdict]][constant[objectinfo]]
variable[varinfo] assign[=] call[name[cpdict]][constant[varinfo]]
if compare[constant[pfmethods] in name[cpdict]] begin[:]
variable[pfmethods] assign[=] call[name[cpdict]][constant[pfmethods]]
variable[neighbors] assign[=] list[[]]
if compare[<ast.BoolOp object at 0x7da18c4cda50> greater[>] constant[0]] begin[:]
variable[nbrlist] assign[=] call[name[cpdict]][constant[neighbors]]
for taget[name[nbr]] in starred[name[nbrlist]] begin[:]
if compare[constant[magdiffs] in name[nbr]] begin[:]
variable[nbrmagdiffs] assign[=] call[name[nbr]][constant[magdiffs]]
if compare[constant[colordiffs] in name[nbr]] begin[:]
variable[nbrcolordiffs] assign[=] call[name[nbr]][constant[colordiffs]]
variable[thisnbrdict] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfb80>, <ast.Constant object at 0x7da18c4cf670>], [<ast.Subscript object at 0x7da18c4cde40>, <ast.Dict object at 0x7da18c4cd7b0>]]
<ast.Try object at 0x7da18c4cd2d0>
<ast.Try object at 0x7da18f00d660>
call[name[neighbors].append, parameter[name[thisnbrdict]]]
if compare[constant[comments] in name[cpdict]] begin[:]
variable[objectcomments] assign[=] call[name[cpdict]][constant[comments]]
if compare[constant[xmatch] in name[cpdict]] begin[:]
variable[objectxmatch] assign[=] call[name[cpdict]][constant[xmatch]]
if compare[constant[colormagdiagram] in name[cpdict]] begin[:]
variable[colormagdiagram] assign[=] call[name[cpdict]][constant[colormagdiagram]]
if compare[constant[finderchart] in name[cpdict]] begin[:]
variable[finderchart] assign[=] call[name[cpdict]][constant[finderchart]]
if <ast.BoolOp object at 0x7da18f00e7a0> begin[:]
variable[magseries] assign[=] call[call[name[cpdict]][constant[magseries]]][constant[plot]]
variable[time0] assign[=] call[call[call[name[cpdict]][constant[magseries]]][constant[times]].min, parameter[]]
variable[magseries_ndet] assign[=] call[call[name[cpdict]][constant[magseries]]][constant[times]].size
if compare[constant[status] in name[cpdict]] begin[:]
variable[cpstatus] assign[=] call[name[cpdict]][constant[status]]
if compare[constant[uifilters] in name[cpdict]] begin[:]
variable[uifilters] assign[=] call[name[cpdict]][constant[uifilters]]
variable[resultdict] assign[=] dictionary[[<ast.Constant object at 0x7da20c76f040>, <ast.Constant object at 0x7da20c76f580>, <ast.Constant object at 0x7da20c76db40>, <ast.Constant object at 0x7da20c76c8e0>], [<ast.Constant object at 0x7da20c76ed10>, <ast.BinOp object at 0x7da20c76df30>, <ast.Constant object at 0x7da20c76f280>, <ast.Dict object at 0x7da20c76c4f0>]]
for taget[name[key]] in starred[name[pfmethods]] begin[:]
variable[periodogram] assign[=] call[call[name[cpdict]][name[key]]][constant[periodogram]]
if <ast.BoolOp object at 0x7da20c76e6e0> begin[:]
variable[phasedlc0plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[plot]]
variable[phasedlc0period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[period]]]]
variable[phasedlc0epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[0]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da20c76ee60> begin[:]
variable[phasedlc0fit] assign[=] dictionary[[<ast.Constant object at 0x7da20c76cb50>, <ast.Constant object at 0x7da20c76d5d0>, <ast.Constant object at 0x7da20c76e680>, <ast.Constant object at 0x7da20c76e140>], [<ast.Subscript object at 0x7da20c76f7c0>, <ast.Subscript object at 0x7da20c76d330>, <ast.Subscript object at 0x7da20c76c2b0>, <ast.IfExp object at 0x7da20c76e770>]]
if <ast.BoolOp object at 0x7da18ede5600> begin[:]
variable[phasedlc1plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[plot]]
variable[phasedlc1period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[period]]]]
variable[phasedlc1epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[1]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da18ede6c80> begin[:]
variable[phasedlc1fit] assign[=] dictionary[[<ast.Constant object at 0x7da18ede7ee0>, <ast.Constant object at 0x7da18ede4f70>, <ast.Constant object at 0x7da18ede6c20>, <ast.Constant object at 0x7da18ede6350>], [<ast.Subscript object at 0x7da18ede6710>, <ast.Subscript object at 0x7da18ede6140>, <ast.Subscript object at 0x7da18ede68c0>, <ast.IfExp object at 0x7da18ede4eb0>]]
if <ast.BoolOp object at 0x7da207f021d0> begin[:]
variable[phasedlc2plot] assign[=] call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[plot]]
variable[phasedlc2period] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[period]]]]
variable[phasedlc2epoch] assign[=] call[name[float], parameter[call[call[call[name[cpdict]][name[key]]][constant[2]]][constant[epoch]]]]
if <ast.BoolOp object at 0x7da207f00af0> begin[:]
variable[phasedlc2fit] assign[=] dictionary[[<ast.Constant object at 0x7da207f9b430>, <ast.Constant object at 0x7da207f99c30>, <ast.Constant object at 0x7da207f9b250>, <ast.Constant object at 0x7da207f99210>], [<ast.Subscript object at 0x7da207f99db0>, <ast.Subscript object at 0x7da207f989d0>, <ast.Subscript object at 0x7da207f9b700>, <ast.IfExp object at 0x7da207f9b850>]]
call[call[name[resultdict]][constant[result]]][name[key]] assign[=] dictionary[[<ast.Constant object at 0x7da207f9b4c0>, <ast.Constant object at 0x7da207f99f30>, <ast.Constant object at 0x7da207f9a110>, <ast.Constant object at 0x7da207f99420>, <ast.Constant object at 0x7da207f9b400>, <ast.Constant object at 0x7da207f99600>], [<ast.Subscript object at 0x7da207f99ff0>, <ast.Name object at 0x7da207f9a980>, <ast.Subscript object at 0x7da207f9a860>, <ast.Dict object at 0x7da207f9a9e0>, <ast.Dict object at 0x7da207f9bdc0>, <ast.Dict object at 0x7da207f9bb20>]]
call[name[self].set_header, parameter[constant[Content-Type], constant[application/json; charset=UTF-8]]]
call[name[self].write, parameter[name[resultdict]]]
call[name[self].finish, parameter[]] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
identifier[provided_key] = identifier[self] . identifier[get_argument] ( literal[string] , identifier[default] = keyword[None] )
keyword[if] keyword[not] identifier[provided_key] :
identifier[LOGGER] . identifier[error] ( literal[string] )
identifier[retdict] ={ literal[string] : literal[string] ,
literal[string] :( literal[string]
literal[string] ),
literal[string] : keyword[None] ,
literal[string] : keyword[True] }
identifier[self] . identifier[set_status] ( literal[int] )
identifier[self] . identifier[write] ( identifier[retdict] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] ()
keyword[else] :
identifier[provided_key] = identifier[xhtml_escape] ( identifier[provided_key] )
keyword[if] keyword[not] identifier[_time_independent_equals] ( identifier[provided_key] ,
identifier[self] . identifier[secret] ):
identifier[LOGGER] . identifier[error] ( literal[string] )
identifier[retdict] ={ literal[string] : literal[string] ,
literal[string] :( literal[string]
literal[string] ),
literal[string] : keyword[None] ,
literal[string] : keyword[True] }
identifier[self] . identifier[set_status] ( literal[int] )
identifier[self] . identifier[write] ( identifier[retdict] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] ()
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[checkplotfname] = identifier[self] . identifier[get_argument] ( literal[string] , identifier[default] = keyword[None] )
keyword[if] identifier[checkplotfname] :
keyword[try] :
identifier[cpfpath] = identifier[xhtml_escape] (
identifier[base64] . identifier[b64decode] ( identifier[url_unescape] ( identifier[checkplotfname] ))
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[msg] = literal[string]
identifier[LOGGER] . identifier[error] ( identifier[msg] )
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : identifier[msg] ,
literal[string] : keyword[None] ,
literal[string] : keyword[True] }
identifier[self] . identifier[set_status] ( literal[int] )
identifier[self] . identifier[write] ( identifier[resultdict] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] ()
identifier[LOGGER] . identifier[info] ( literal[string] % identifier[cpfpath] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[cpfpath] ):
identifier[msg] = literal[string] % identifier[cpfpath]
identifier[LOGGER] . identifier[error] ( identifier[msg] )
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : identifier[msg] ,
literal[string] : keyword[None] ,
literal[string] : keyword[True] }
identifier[self] . identifier[set_status] ( literal[int] )
identifier[self] . identifier[write] ( identifier[resultdict] )
keyword[raise] identifier[tornado] . identifier[web] . identifier[Finish] ()
identifier[cpdict] = keyword[yield] identifier[self] . identifier[executor] . identifier[submit] (
identifier[_read_checkplot_picklefile] , identifier[cpfpath]
)
identifier[LOGGER] . identifier[info] ( literal[string] % identifier[cpfpath] )
identifier[objectid] = identifier[cpdict] [ literal[string] ]
identifier[objectinfo] = identifier[cpdict] [ literal[string] ]
identifier[varinfo] = identifier[cpdict] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[pfmethods] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[pfmethods] =[]
keyword[for] identifier[pfm] keyword[in] identifier[PFMETHODS] :
keyword[if] identifier[pfm] keyword[in] identifier[cpdict] :
identifier[pfmethods] . identifier[append] ( identifier[pfm] )
identifier[neighbors] =[]
keyword[if] ( literal[string] keyword[in] identifier[cpdict] keyword[and]
identifier[cpdict] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[len] ( identifier[cpdict] [ literal[string] ]))> literal[int] :
identifier[nbrlist] = identifier[cpdict] [ literal[string] ]
keyword[for] identifier[nbr] keyword[in] identifier[nbrlist] :
keyword[if] literal[string] keyword[in] identifier[nbr] :
identifier[nbrmagdiffs] = identifier[nbr] [ literal[string] ]
keyword[else] :
identifier[nbrmagdiffs] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[nbr] :
identifier[nbrcolordiffs] = identifier[nbr] [ literal[string] ]
keyword[else] :
identifier[nbrcolordiffs] = keyword[None]
identifier[thisnbrdict] ={
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] :{
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbr] [ literal[string] ],
literal[string] : identifier[nbrmagdiffs] ,
literal[string] : identifier[nbrcolordiffs]
}
}
keyword[try] :
identifier[nbr_magseries] = identifier[nbr] [ literal[string] ][ literal[string] ]
identifier[thisnbrdict] [ literal[string] ]= identifier[nbr_magseries]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[error] (
literal[string]
literal[string]
%( identifier[nbr] [ literal[string] ],
identifier[cpdict] [ literal[string] ])
)
keyword[try] :
keyword[for] identifier[pfm] keyword[in] identifier[pfmethods] :
keyword[if] identifier[pfm] keyword[in] identifier[nbr] :
identifier[thisnbrdict] [ identifier[pfm] ]={
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ],
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ],
literal[string] : identifier[nbr] [ identifier[pfm] ][ literal[int] ][ literal[string] ]
}
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[error] (
literal[string]
literal[string]
%( identifier[nbr] [ literal[string] ],
identifier[cpdict] [ literal[string] ])
)
identifier[neighbors] . identifier[append] ( identifier[thisnbrdict] )
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[objectcomments] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[objectcomments] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[objectxmatch] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[objectxmatch] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[colormagdiagram] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[colormagdiagram] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[finderchart] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[finderchart] = keyword[None]
keyword[if] ( literal[string] keyword[in] identifier[cpdict] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ literal[string] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ literal[string] ]):
identifier[magseries] = identifier[cpdict] [ literal[string] ][ literal[string] ]
identifier[time0] = identifier[cpdict] [ literal[string] ][ literal[string] ]. identifier[min] ()
identifier[magseries_ndet] = identifier[cpdict] [ literal[string] ][ literal[string] ]. identifier[size]
keyword[else] :
identifier[magseries] = keyword[None]
identifier[time0] = literal[int]
identifier[magseries_ndet] = literal[int]
identifier[LOGGER] . identifier[warning] (
literal[string]
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[cpstatus] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[cpstatus] = literal[string]
keyword[if] literal[string] keyword[in] identifier[cpdict] :
identifier[uifilters] = identifier[cpdict] [ literal[string] ]
keyword[else] :
identifier[uifilters] ={ literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] }
identifier[resultdict] ={
literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[cpfpath] ),
literal[string] : keyword[True] ,
literal[string] :{
literal[string] : literal[string] % identifier[time0] ,
literal[string] : identifier[objectid] ,
literal[string] : identifier[objectinfo] ,
literal[string] : identifier[colormagdiagram] ,
literal[string] : identifier[objectcomments] ,
literal[string] : identifier[varinfo] ,
literal[string] : identifier[uifilters] ,
literal[string] : identifier[neighbors] ,
literal[string] : identifier[objectxmatch] ,
literal[string] : identifier[finderchart] ,
literal[string] : identifier[magseries] ,
literal[string] : identifier[magseries_ndet] ,
literal[string] : identifier[cpstatus] ,
literal[string] : identifier[pfmethods]
}
}
keyword[for] identifier[key] keyword[in] identifier[pfmethods] :
identifier[periodogram] = identifier[cpdict] [ identifier[key] ][ literal[string] ]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc0plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc0period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc0epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc0plot] = keyword[None]
identifier[phasedlc0period] = keyword[None]
identifier[phasedlc0epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc0fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc0fit] = keyword[None]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc1plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc1period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc1epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc1plot] = keyword[None]
identifier[phasedlc1period] = keyword[None]
identifier[phasedlc1epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc1fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc1fit] = keyword[None]
keyword[if] literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and] identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ):
identifier[phasedlc2plot] = identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ]
identifier[phasedlc2period] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
identifier[phasedlc2epoch] = identifier[float] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ])
keyword[else] :
identifier[phasedlc2plot] = keyword[None]
identifier[phasedlc2period] = keyword[None]
identifier[phasedlc2epoch] = keyword[None]
keyword[if] ( literal[int] keyword[in] identifier[cpdict] [ identifier[key] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ], identifier[dict] ) keyword[and]
literal[string] keyword[in] identifier[cpdict] [ identifier[key] ][ literal[int] ] keyword[and]
identifier[isinstance] ( identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ], identifier[dict] )):
identifier[phasedlc2fit] ={
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ]
),
literal[string] :(
identifier[cpdict] [ identifier[key] ][ literal[int] ][
literal[string]
][ literal[string] ][ literal[string] ] keyword[if]
literal[string] keyword[in]
identifier[cpdict] [ identifier[key] ][ literal[int] ][ literal[string] ][ literal[string] ] keyword[else] keyword[None]
)
}
keyword[else] :
identifier[phasedlc2fit] = keyword[None]
identifier[resultdict] [ literal[string] ][ identifier[key] ]={
literal[string] : identifier[cpdict] [ identifier[key] ][ literal[string] ],
literal[string] : identifier[periodogram] ,
literal[string] : identifier[cpdict] [ identifier[key] ][ literal[string] ],
literal[string] :{
literal[string] : identifier[phasedlc0plot] ,
literal[string] : identifier[phasedlc0period] ,
literal[string] : identifier[phasedlc0epoch] ,
literal[string] : identifier[phasedlc0fit] ,
},
literal[string] :{
literal[string] : identifier[phasedlc1plot] ,
literal[string] : identifier[phasedlc1period] ,
literal[string] : identifier[phasedlc1epoch] ,
literal[string] : identifier[phasedlc1fit] ,
},
literal[string] :{
literal[string] : identifier[phasedlc2plot] ,
literal[string] : identifier[phasedlc2period] ,
literal[string] : identifier[phasedlc2epoch] ,
literal[string] : identifier[phasedlc2fit] ,
},
}
identifier[self] . identifier[set_header] ( literal[string] , literal[string] )
identifier[self] . identifier[write] ( identifier[resultdict] )
identifier[self] . identifier[finish] ()
keyword[else] :
identifier[LOGGER] . identifier[error] ( literal[string] )
identifier[resultdict] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] : keyword[None] }
identifier[self] . identifier[status] ( literal[int] )
identifier[self] . identifier[write] ( identifier[resultdict] )
identifier[self] . identifier[finish] () | def get(self):
"""This handles GET requests.
Returns the requested checkplot pickle's information as JSON.
Requires a pre-shared secret `key` argument for the operation to
complete successfully. This is obtained from a command-line argument.
"""
provided_key = self.get_argument('key', default=None)
if not provided_key:
LOGGER.error('standalone URL hit but no secret key provided')
retdict = {'status': 'error', 'message': 'standalone URL hit but no secret key provided', 'result': None, 'readonly': True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish() # depends on [control=['if'], data=[]]
else:
provided_key = xhtml_escape(provided_key)
if not _time_independent_equals(provided_key, self.secret):
LOGGER.error('secret key provided does not match known key')
retdict = {'status': 'error', 'message': 'standalone URL hit but no secret key provided', 'result': None, 'readonly': True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish() # depends on [control=['if'], data=[]]
#
# actually start work here
#
LOGGER.info('key auth OK')
checkplotfname = self.get_argument('cp', default=None)
if checkplotfname:
try:
# do the usual safing
cpfpath = xhtml_escape(base64.b64decode(url_unescape(checkplotfname))) # depends on [control=['try'], data=[]]
except Exception as e:
msg = 'could not decode the incoming payload'
LOGGER.error(msg)
resultdict = {'status': 'error', 'message': msg, 'result': None, 'readonly': True}
self.set_status(400)
self.write(resultdict)
raise tornado.web.Finish() # depends on [control=['except'], data=[]]
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status': 'error', 'message': msg, 'result': None, 'readonly': True}
self.set_status(404)
self.write(resultdict)
raise tornado.web.Finish() # depends on [control=['if'], data=[]]
#
# load the checkplot
#
# this is the async call to the executor
cpdict = (yield self.executor.submit(_read_checkplot_picklefile, cpfpath))
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods'] # depends on [control=['if'], data=['cpdict']]
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm) # depends on [control=['if'], data=['pfm']] # depends on [control=['for'], data=['pfm']]
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and cpdict['neighbors'] is not None and len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs'] # depends on [control=['if'], data=['nbr']]
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs'] # depends on [control=['if'], data=['nbr']]
else:
nbrcolordiffs = None
thisnbrdict = {'objectid': nbr['objectid'], 'objectinfo': {'ra': nbr['ra'], 'decl': nbr['decl'], 'xpix': nbr['xpix'], 'ypix': nbr['ypix'], 'distarcsec': nbr['dist'], 'magdiffs': nbrmagdiffs, 'colordiffs': nbrcolordiffs}}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries # depends on [control=['try'], data=[]]
except Exception as e:
LOGGER.error('could not load magseries plot for neighbor %s for object %s' % (nbr['objectid'], cpdict['objectid'])) # depends on [control=['except'], data=[]]
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {'plot': nbr[pfm][0]['plot'], 'period': nbr[pfm][0]['period'], 'epoch': nbr[pfm][0]['epoch']} # depends on [control=['if'], data=['pfm', 'nbr']] # depends on [control=['for'], data=['pfm']] # depends on [control=['try'], data=[]]
except Exception as e:
LOGGER.error('could not load phased LC plots for neighbor %s for object %s' % (nbr['objectid'], cpdict['objectid'])) # depends on [control=['except'], data=[]]
neighbors.append(thisnbrdict) # depends on [control=['for'], data=['nbr']] # depends on [control=['if'], data=[]]
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments'] # depends on [control=['if'], data=['cpdict']]
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch'] # depends on [control=['if'], data=['cpdict']]
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram'] # depends on [control=['if'], data=['cpdict']]
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart'] # depends on [control=['if'], data=['cpdict']]
else:
finderchart = None
if 'magseries' in cpdict and isinstance(cpdict['magseries'], dict) and ('plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size # depends on [control=['if'], data=[]]
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning("no 'magseries' key present in this checkplot, some plots may be broken...")
if 'status' in cpdict:
cpstatus = cpdict['status'] # depends on [control=['if'], data=['cpdict']]
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters'] # depends on [control=['if'], data=['cpdict']]
else:
uifilters = {'psearch_magfilters': None, 'psearch_sigclip': None, 'psearch_timefilters': None}
# this is the initial dict
# fallback in case objectinfo doesn't have ndet
resultdict = {'status': 'ok', 'message': 'found checkplot %s' % os.path.basename(cpfpath), 'readonly': True, 'result': {'time0': '%.3f' % time0, 'objectid': objectid, 'objectinfo': objectinfo, 'colormagdiagram': colormagdiagram, 'objectcomments': objectcomments, 'varinfo': varinfo, 'uifilters': uifilters, 'neighbors': neighbors, 'xmatch': objectxmatch, 'finderchart': finderchart, 'magseries': magseries, 'magseries_ndet': magseries_ndet, 'cpstatus': cpstatus, 'pfmethods': pfmethods}}
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict) and ('lcfit' in cpdict[key][0]) and isinstance(cpdict[key][0]['lcfit'], dict):
phasedlc0fit = {'method': cpdict[key][0]['lcfit']['fittype'], 'redchisq': cpdict[key][0]['lcfit']['fitredchisq'], 'chisq': cpdict[key][0]['lcfit']['fitchisq'], 'params': cpdict[key][0]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][0]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict) and ('lcfit' in cpdict[key][1]) and isinstance(cpdict[key][1]['lcfit'], dict):
phasedlc1fit = {'method': cpdict[key][1]['lcfit']['fittype'], 'redchisq': cpdict[key][1]['lcfit']['fitredchisq'], 'chisq': cpdict[key][1]['lcfit']['fitchisq'], 'params': cpdict[key][1]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][1]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch']) # depends on [control=['if'], data=[]]
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict) and ('lcfit' in cpdict[key][2]) and isinstance(cpdict[key][2]['lcfit'], dict):
phasedlc2fit = {'method': cpdict[key][2]['lcfit']['fittype'], 'redchisq': cpdict[key][2]['lcfit']['fitredchisq'], 'chisq': cpdict[key][2]['lcfit']['fitchisq'], 'params': cpdict[key][2]['lcfit']['fitinfo']['finalparams'] if 'finalparams' in cpdict[key][2]['lcfit']['fitinfo'] else None} # depends on [control=['if'], data=[]]
else:
phasedlc2fit = None
resultdict['result'][key] = {'nbestperiods': cpdict[key]['nbestperiods'], 'periodogram': periodogram, 'bestperiod': cpdict[key]['bestperiod'], 'phasedlc0': {'plot': phasedlc0plot, 'period': phasedlc0period, 'epoch': phasedlc0epoch, 'lcfit': phasedlc0fit}, 'phasedlc1': {'plot': phasedlc1plot, 'period': phasedlc1period, 'epoch': phasedlc1epoch, 'lcfit': phasedlc1fit}, 'phasedlc2': {'plot': phasedlc2plot, 'period': phasedlc2period, 'epoch': phasedlc2epoch, 'lcfit': phasedlc2fit}} # depends on [control=['for'], data=['key']]
#
# end of processing per pfmethod
#
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(resultdict)
self.finish() # depends on [control=['if'], data=[]]
else:
LOGGER.error('no checkplot file requested')
resultdict = {'status': 'error', 'message': "This checkplot doesn't exist.", 'readonly': True, 'result': None}
self.status(400)
self.write(resultdict)
self.finish() |
def edit_distance(wordA,wordB):
"""" Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA)
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB)
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
dist_AB = [[0 for i in range(0,n_B+1)] for i in range(0,(n_A+1))]
# Target prefix reached by insertion
for j in range(1,n_B+1):
dist_AB[0][j] = j
for i in range(1,n_A+1):
dist_AB[i][0] = i
for j in range(1,n_B+1):
for i in range(1,n_A+1):
if (lettersA[i-1] == lettersB[j-1]):
new_dist = dist_AB[i-1][j-1]
else:
new_dist = min( [dist_AB[i-1][j]+1, dist_AB[i][j-1]+1, dist_AB[i-1][j-1]+1] ) #del, ins, or sub
dist_AB[i][j] = new_dist
return dist_AB[-1][-1] | def function[edit_distance, parameter[wordA, wordB]]:
constant[" Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance]
if <ast.UnaryOp object at 0x7da1b06256c0> begin[:]
variable[lettersA] assign[=] call[name[tamil].utf8.get_letters, parameter[name[wordA]]]
if <ast.UnaryOp object at 0x7da1b06276d0> begin[:]
variable[lettersB] assign[=] call[name[tamil].utf8.get_letters, parameter[name[wordB]]]
variable[n_A] assign[=] call[name[len], parameter[name[lettersA]]]
variable[n_B] assign[=] call[name[len], parameter[name[lettersB]]]
variable[dist_AB] assign[=] <ast.ListComp object at 0x7da1b0627b20>
for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n_B] + constant[1]]]]] begin[:]
call[call[name[dist_AB]][constant[0]]][name[j]] assign[=] name[j]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n_A] + constant[1]]]]] begin[:]
call[call[name[dist_AB]][name[i]]][constant[0]] assign[=] name[i]
for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n_B] + constant[1]]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n_A] + constant[1]]]]] begin[:]
if compare[call[name[lettersA]][binary_operation[name[i] - constant[1]]] equal[==] call[name[lettersB]][binary_operation[name[j] - constant[1]]]] begin[:]
variable[new_dist] assign[=] call[call[name[dist_AB]][binary_operation[name[i] - constant[1]]]][binary_operation[name[j] - constant[1]]]
call[call[name[dist_AB]][name[i]]][name[j]] assign[=] name[new_dist]
return[call[call[name[dist_AB]][<ast.UnaryOp object at 0x7da18f722710>]][<ast.UnaryOp object at 0x7da18f720700>]] | keyword[def] identifier[edit_distance] ( identifier[wordA] , identifier[wordB] ):
literal[string]
keyword[if] keyword[not] identifier[type] ( identifier[wordA] ) keyword[is] identifier[list] :
identifier[lettersA] = identifier[tamil] . identifier[utf8] . identifier[get_letters] ( identifier[wordA] )
keyword[else] :
identifier[lettersA] = identifier[wordA]
keyword[if] keyword[not] identifier[type] ( identifier[wordB] ) keyword[is] identifier[list] :
identifier[lettersB] = identifier[tamil] . identifier[utf8] . identifier[get_letters] ( identifier[wordB] )
keyword[else] :
identifier[lettersB] = identifier[wordB]
identifier[n_A] = identifier[len] ( identifier[lettersA] )
identifier[n_B] = identifier[len] ( identifier[lettersB] )
identifier[dist_AB] =[[ literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n_B] + literal[int] )] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ,( identifier[n_A] + literal[int] ))]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[n_B] + literal[int] ):
identifier[dist_AB] [ literal[int] ][ identifier[j] ]= identifier[j]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n_A] + literal[int] ):
identifier[dist_AB] [ identifier[i] ][ literal[int] ]= identifier[i]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[n_B] + literal[int] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n_A] + literal[int] ):
keyword[if] ( identifier[lettersA] [ identifier[i] - literal[int] ]== identifier[lettersB] [ identifier[j] - literal[int] ]):
identifier[new_dist] = identifier[dist_AB] [ identifier[i] - literal[int] ][ identifier[j] - literal[int] ]
keyword[else] :
identifier[new_dist] = identifier[min] ([ identifier[dist_AB] [ identifier[i] - literal[int] ][ identifier[j] ]+ literal[int] , identifier[dist_AB] [ identifier[i] ][ identifier[j] - literal[int] ]+ literal[int] , identifier[dist_AB] [ identifier[i] - literal[int] ][ identifier[j] - literal[int] ]+ literal[int] ])
identifier[dist_AB] [ identifier[i] ][ identifier[j] ]= identifier[new_dist]
keyword[return] identifier[dist_AB] [- literal[int] ][- literal[int] ] | def edit_distance(wordA, wordB):
"""" Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA) # depends on [control=['if'], data=[]]
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB) # depends on [control=['if'], data=[]]
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
dist_AB = [[0 for i in range(0, n_B + 1)] for i in range(0, n_A + 1)]
# Target prefix reached by insertion
for j in range(1, n_B + 1):
dist_AB[0][j] = j # depends on [control=['for'], data=['j']]
for i in range(1, n_A + 1):
dist_AB[i][0] = i # depends on [control=['for'], data=['i']]
for j in range(1, n_B + 1):
for i in range(1, n_A + 1):
if lettersA[i - 1] == lettersB[j - 1]:
new_dist = dist_AB[i - 1][j - 1] # depends on [control=['if'], data=[]]
else:
new_dist = min([dist_AB[i - 1][j] + 1, dist_AB[i][j - 1] + 1, dist_AB[i - 1][j - 1] + 1]) #del, ins, or sub
dist_AB[i][j] = new_dist # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']]
return dist_AB[-1][-1] |
def add(self, channel_name, entry):
""" add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
"""
if channel_name:
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry)
if "\\" in channel_name:
channel_name = channel_name.split("\\")[0]
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry) | def function[add, parameter[self, channel_name, entry]]:
constant[ add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
]
if name[channel_name] begin[:]
if compare[name[channel_name] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
call[name[self]][name[channel_name]] assign[=] list[[<ast.Name object at 0x7da18f811ea0>]]
if compare[constant[\] in name[channel_name]] begin[:]
variable[channel_name] assign[=] call[call[name[channel_name].split, parameter[constant[\]]]][constant[0]]
if compare[name[channel_name] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
call[name[self]][name[channel_name]] assign[=] list[[<ast.Name object at 0x7da1b1712050>]] | keyword[def] identifier[add] ( identifier[self] , identifier[channel_name] , identifier[entry] ):
literal[string]
keyword[if] identifier[channel_name] :
keyword[if] identifier[channel_name] keyword[not] keyword[in] identifier[self] :
identifier[self] [ identifier[channel_name] ]=[ identifier[entry] ]
keyword[else] :
identifier[self] [ identifier[channel_name] ]. identifier[append] ( identifier[entry] )
keyword[if] literal[string] keyword[in] identifier[channel_name] :
identifier[channel_name] = identifier[channel_name] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[channel_name] keyword[not] keyword[in] identifier[self] :
identifier[self] [ identifier[channel_name] ]=[ identifier[entry] ]
keyword[else] :
identifier[self] [ identifier[channel_name] ]. identifier[append] ( identifier[entry] ) | def add(self, channel_name, entry):
""" add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
"""
if channel_name:
if channel_name not in self:
self[channel_name] = [entry] # depends on [control=['if'], data=['channel_name', 'self']]
else:
self[channel_name].append(entry)
if '\\' in channel_name:
channel_name = channel_name.split('\\')[0]
if channel_name not in self:
self[channel_name] = [entry] # depends on [control=['if'], data=['channel_name', 'self']]
else:
self[channel_name].append(entry) # depends on [control=['if'], data=['channel_name']] # depends on [control=['if'], data=[]] |
def stop(self):
""" Stop all functions running in the thread handler."""
for run_event in self.run_events:
run_event.clear()
for thread in self.thread_pool:
thread.join() | def function[stop, parameter[self]]:
constant[ Stop all functions running in the thread handler.]
for taget[name[run_event]] in starred[name[self].run_events] begin[:]
call[name[run_event].clear, parameter[]]
for taget[name[thread]] in starred[name[self].thread_pool] begin[:]
call[name[thread].join, parameter[]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[for] identifier[run_event] keyword[in] identifier[self] . identifier[run_events] :
identifier[run_event] . identifier[clear] ()
keyword[for] identifier[thread] keyword[in] identifier[self] . identifier[thread_pool] :
identifier[thread] . identifier[join] () | def stop(self):
""" Stop all functions running in the thread handler."""
for run_event in self.run_events:
run_event.clear() # depends on [control=['for'], data=['run_event']]
for thread in self.thread_pool:
thread.join() # depends on [control=['for'], data=['thread']] |
def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = [ ]
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value)
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value)
return targets | def function[_extract_jump_targets, parameter[stmt]]:
constant[
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
]
variable[targets] assign[=] list[[]]
if call[name[isinstance], parameter[name[stmt], name[ailment].Stmt.Jump]] begin[:]
call[name[targets].append, parameter[name[stmt].target.value]]
return[name[targets]] | keyword[def] identifier[_extract_jump_targets] ( identifier[stmt] ):
literal[string]
identifier[targets] =[]
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[ailment] . identifier[Stmt] . identifier[Jump] ):
identifier[targets] . identifier[append] ( identifier[stmt] . identifier[target] . identifier[value] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[ailment] . identifier[Stmt] . identifier[ConditionalJump] ):
identifier[targets] . identifier[append] ( identifier[stmt] . identifier[true_target] . identifier[value] )
identifier[targets] . identifier[append] ( identifier[stmt] . identifier[false_target] . identifier[value] )
keyword[return] identifier[targets] | def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = []
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value) # depends on [control=['if'], data=[]]
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value) # depends on [control=['if'], data=[]]
return targets |
def _mom(self, keys, left, right, cache):
"""
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
"""
if evaluation.get_dependencies(left, right):
raise evaluation.DependencyError(
"sum of dependent distributions not feasible: "
"{} and {}".format(left, right)
)
keys_ = numpy.mgrid[tuple(slice(0, key+1, 1) for key in keys)]
keys_ = keys_.reshape(len(self), -1)
if isinstance(left, Dist):
left = [
evaluation.evaluate_moment(left, key, cache=cache)
for key in keys_.T
]
else:
left = list(reversed(numpy.array(left).T**keys_.T))
if isinstance(right, Dist):
right = [
evaluation.evaluate_moment(right, key, cache=cache)
for key in keys_.T
]
else:
right = list(reversed(numpy.array(right).T**keys_.T))
out = numpy.zeros(keys.shape)
for idx in range(keys_.shape[1]):
key = keys_.T[idx]
coef = comb(keys.T, key)
out += coef*left[idx]*right[idx]*(key <= keys.T)
if len(self) > 1:
out = numpy.prod(out, 1)
return out | def function[_mom, parameter[self, keys, left, right, cache]]:
constant[
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
]
if call[name[evaluation].get_dependencies, parameter[name[left], name[right]]] begin[:]
<ast.Raise object at 0x7da20e9b3c10>
variable[keys_] assign[=] call[name[numpy].mgrid][call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e9b0160>]]]
variable[keys_] assign[=] call[name[keys_].reshape, parameter[call[name[len], parameter[name[self]]], <ast.UnaryOp object at 0x7da20e9b3790>]]
if call[name[isinstance], parameter[name[left], name[Dist]]] begin[:]
variable[left] assign[=] <ast.ListComp object at 0x7da20e9b29e0>
if call[name[isinstance], parameter[name[right], name[Dist]]] begin[:]
variable[right] assign[=] <ast.ListComp object at 0x7da20e9b3d00>
variable[out] assign[=] call[name[numpy].zeros, parameter[name[keys].shape]]
for taget[name[idx]] in starred[call[name[range], parameter[call[name[keys_].shape][constant[1]]]]] begin[:]
variable[key] assign[=] call[name[keys_].T][name[idx]]
variable[coef] assign[=] call[name[comb], parameter[name[keys].T, name[key]]]
<ast.AugAssign object at 0x7da20c6e4940>
if compare[call[name[len], parameter[name[self]]] greater[>] constant[1]] begin[:]
variable[out] assign[=] call[name[numpy].prod, parameter[name[out], constant[1]]]
return[name[out]] | keyword[def] identifier[_mom] ( identifier[self] , identifier[keys] , identifier[left] , identifier[right] , identifier[cache] ):
literal[string]
keyword[if] identifier[evaluation] . identifier[get_dependencies] ( identifier[left] , identifier[right] ):
keyword[raise] identifier[evaluation] . identifier[DependencyError] (
literal[string]
literal[string] . identifier[format] ( identifier[left] , identifier[right] )
)
identifier[keys_] = identifier[numpy] . identifier[mgrid] [ identifier[tuple] ( identifier[slice] ( literal[int] , identifier[key] + literal[int] , literal[int] ) keyword[for] identifier[key] keyword[in] identifier[keys] )]
identifier[keys_] = identifier[keys_] . identifier[reshape] ( identifier[len] ( identifier[self] ),- literal[int] )
keyword[if] identifier[isinstance] ( identifier[left] , identifier[Dist] ):
identifier[left] =[
identifier[evaluation] . identifier[evaluate_moment] ( identifier[left] , identifier[key] , identifier[cache] = identifier[cache] )
keyword[for] identifier[key] keyword[in] identifier[keys_] . identifier[T]
]
keyword[else] :
identifier[left] = identifier[list] ( identifier[reversed] ( identifier[numpy] . identifier[array] ( identifier[left] ). identifier[T] ** identifier[keys_] . identifier[T] ))
keyword[if] identifier[isinstance] ( identifier[right] , identifier[Dist] ):
identifier[right] =[
identifier[evaluation] . identifier[evaluate_moment] ( identifier[right] , identifier[key] , identifier[cache] = identifier[cache] )
keyword[for] identifier[key] keyword[in] identifier[keys_] . identifier[T]
]
keyword[else] :
identifier[right] = identifier[list] ( identifier[reversed] ( identifier[numpy] . identifier[array] ( identifier[right] ). identifier[T] ** identifier[keys_] . identifier[T] ))
identifier[out] = identifier[numpy] . identifier[zeros] ( identifier[keys] . identifier[shape] )
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[keys_] . identifier[shape] [ literal[int] ]):
identifier[key] = identifier[keys_] . identifier[T] [ identifier[idx] ]
identifier[coef] = identifier[comb] ( identifier[keys] . identifier[T] , identifier[key] )
identifier[out] += identifier[coef] * identifier[left] [ identifier[idx] ]* identifier[right] [ identifier[idx] ]*( identifier[key] <= identifier[keys] . identifier[T] )
keyword[if] identifier[len] ( identifier[self] )> literal[int] :
identifier[out] = identifier[numpy] . identifier[prod] ( identifier[out] , literal[int] )
keyword[return] identifier[out] | def _mom(self, keys, left, right, cache):
"""
Statistical moments.
Example:
>>> print(numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4))
[1. 0.5 0.3333 0.25 ]
>>> print(numpy.around(chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4))
[ 1. 2.5 6.3333 16.25 ]
>>> print(numpy.around(chaospy.Add(1, 1).mom([0, 1, 2, 3]), 4))
[1. 2. 4. 8.]
"""
if evaluation.get_dependencies(left, right):
raise evaluation.DependencyError('sum of dependent distributions not feasible: {} and {}'.format(left, right)) # depends on [control=['if'], data=[]]
keys_ = numpy.mgrid[tuple((slice(0, key + 1, 1) for key in keys))]
keys_ = keys_.reshape(len(self), -1)
if isinstance(left, Dist):
left = [evaluation.evaluate_moment(left, key, cache=cache) for key in keys_.T] # depends on [control=['if'], data=[]]
else:
left = list(reversed(numpy.array(left).T ** keys_.T))
if isinstance(right, Dist):
right = [evaluation.evaluate_moment(right, key, cache=cache) for key in keys_.T] # depends on [control=['if'], data=[]]
else:
right = list(reversed(numpy.array(right).T ** keys_.T))
out = numpy.zeros(keys.shape)
for idx in range(keys_.shape[1]):
key = keys_.T[idx]
coef = comb(keys.T, key)
out += coef * left[idx] * right[idx] * (key <= keys.T) # depends on [control=['for'], data=['idx']]
if len(self) > 1:
out = numpy.prod(out, 1) # depends on [control=['if'], data=[]]
return out |
def date_time_this_year(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current year.
:param before_now: include days in current year before today
:param after_now: include days in current year after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_year_start = now.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_year_start, next_year_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_year_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_year_start, now, tzinfo)
else:
return now | def function[date_time_this_year, parameter[self, before_now, after_now, tzinfo]]:
constant[
Gets a DateTime object for the current year.
:param before_now: include days in current year before today
:param after_now: include days in current year after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
]
variable[now] assign[=] call[name[datetime].now, parameter[name[tzinfo]]]
variable[this_year_start] assign[=] call[name[now].replace, parameter[]]
variable[next_year_start] assign[=] call[name[datetime], parameter[binary_operation[name[now].year + constant[1]], constant[1], constant[1]]]
if <ast.BoolOp object at 0x7da207f98b20> begin[:]
return[call[name[self].date_time_between_dates, parameter[name[this_year_start], name[next_year_start], name[tzinfo]]]] | keyword[def] identifier[date_time_this_year] (
identifier[self] ,
identifier[before_now] = keyword[True] ,
identifier[after_now] = keyword[False] ,
identifier[tzinfo] = keyword[None] ):
literal[string]
identifier[now] = identifier[datetime] . identifier[now] ( identifier[tzinfo] )
identifier[this_year_start] = identifier[now] . identifier[replace] (
identifier[month] = literal[int] , identifier[day] = literal[int] , identifier[hour] = literal[int] , identifier[minute] = literal[int] , identifier[second] = literal[int] , identifier[microsecond] = literal[int] )
identifier[next_year_start] = identifier[datetime] ( identifier[now] . identifier[year] + literal[int] , literal[int] , literal[int] , identifier[tzinfo] = identifier[tzinfo] )
keyword[if] identifier[before_now] keyword[and] identifier[after_now] :
keyword[return] identifier[self] . identifier[date_time_between_dates] (
identifier[this_year_start] , identifier[next_year_start] , identifier[tzinfo] )
keyword[elif] keyword[not] identifier[before_now] keyword[and] identifier[after_now] :
keyword[return] identifier[self] . identifier[date_time_between_dates] ( identifier[now] , identifier[next_year_start] , identifier[tzinfo] )
keyword[elif] keyword[not] identifier[after_now] keyword[and] identifier[before_now] :
keyword[return] identifier[self] . identifier[date_time_between_dates] ( identifier[this_year_start] , identifier[now] , identifier[tzinfo] )
keyword[else] :
keyword[return] identifier[now] | def date_time_this_year(self, before_now=True, after_now=False, tzinfo=None):
"""
Gets a DateTime object for the current year.
:param before_now: include days in current year before today
:param after_now: include days in current year after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_year_start = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(this_year_start, next_year_start, tzinfo) # depends on [control=['if'], data=[]]
elif not before_now and after_now:
return self.date_time_between_dates(now, next_year_start, tzinfo) # depends on [control=['if'], data=[]]
elif not after_now and before_now:
return self.date_time_between_dates(this_year_start, now, tzinfo) # depends on [control=['if'], data=[]]
else:
return now |
def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for column, dtype in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype) | def function[_revert_categories, parameter[self]]:
constant[
Inplace conversion to categories.
]
for taget[tuple[[<ast.Name object at 0x7da20c991c00>, <ast.Name object at 0x7da20c991210>]]] in starred[call[name[self]._categories.items, parameter[]]] begin[:]
if compare[name[column] in name[self].columns] begin[:]
call[name[self]][name[column]] assign[=] call[call[name[self]][name[column]].astype, parameter[name[dtype]]] | keyword[def] identifier[_revert_categories] ( identifier[self] ):
literal[string]
keyword[for] identifier[column] , identifier[dtype] keyword[in] identifier[self] . identifier[_categories] . identifier[items] ():
keyword[if] identifier[column] keyword[in] identifier[self] . identifier[columns] :
identifier[self] [ identifier[column] ]= identifier[self] [ identifier[column] ]. identifier[astype] ( identifier[dtype] ) | def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for (column, dtype) in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype) # depends on [control=['if'], data=['column']] # depends on [control=['for'], data=[]] |
def _call(self, x, out=None):
"""Linearly combine ``x`` and write to ``out`` if given."""
if out is None:
out = self.range.element()
out.lincomb(self.a, x[0], self.b, x[1])
return out | def function[_call, parameter[self, x, out]]:
constant[Linearly combine ``x`` and write to ``out`` if given.]
if compare[name[out] is constant[None]] begin[:]
variable[out] assign[=] call[name[self].range.element, parameter[]]
call[name[out].lincomb, parameter[name[self].a, call[name[x]][constant[0]], name[self].b, call[name[x]][constant[1]]]]
return[name[out]] | keyword[def] identifier[_call] ( identifier[self] , identifier[x] , identifier[out] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[out] = identifier[self] . identifier[range] . identifier[element] ()
identifier[out] . identifier[lincomb] ( identifier[self] . identifier[a] , identifier[x] [ literal[int] ], identifier[self] . identifier[b] , identifier[x] [ literal[int] ])
keyword[return] identifier[out] | def _call(self, x, out=None):
"""Linearly combine ``x`` and write to ``out`` if given."""
if out is None:
out = self.range.element() # depends on [control=['if'], data=['out']]
out.lincomb(self.a, x[0], self.b, x[1])
return out |
def do_python(self, arg):
"""
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# When given a Python statement, execute it directly.
if arg:
try:
compat.exec_(arg, globals(), locals())
except Exception:
traceback.print_exc()
# When no statement is given, spawn a Python interpreter.
else:
try:
self._spawn_python_shell(arg)
except Exception:
e = sys.exc_info()[1]
raise CmdError(
"unhandled exception when running Python console: %s" % e) | def function[do_python, parameter[self, arg]]:
constant[
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
]
if name[self].cmdprefix begin[:]
<ast.Raise object at 0x7da18fe91450>
if name[arg] begin[:]
<ast.Try object at 0x7da18fe93df0> | keyword[def] identifier[do_python] ( identifier[self] , identifier[arg] ):
literal[string]
keyword[if] identifier[self] . identifier[cmdprefix] :
keyword[raise] identifier[CmdError] ( literal[string] )
keyword[if] identifier[arg] :
keyword[try] :
identifier[compat] . identifier[exec_] ( identifier[arg] , identifier[globals] (), identifier[locals] ())
keyword[except] identifier[Exception] :
identifier[traceback] . identifier[print_exc] ()
keyword[else] :
keyword[try] :
identifier[self] . identifier[_spawn_python_shell] ( identifier[arg] )
keyword[except] identifier[Exception] :
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
keyword[raise] identifier[CmdError] (
literal[string] % identifier[e] ) | def do_python(self, arg):
"""
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
"""
if self.cmdprefix:
raise CmdError('prefix not allowed') # depends on [control=['if'], data=[]]
# When given a Python statement, execute it directly.
if arg:
try:
compat.exec_(arg, globals(), locals()) # depends on [control=['try'], data=[]]
except Exception:
traceback.print_exc() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
# When no statement is given, spawn a Python interpreter.
try:
self._spawn_python_shell(arg) # depends on [control=['try'], data=[]]
except Exception:
e = sys.exc_info()[1]
raise CmdError('unhandled exception when running Python console: %s' % e) # depends on [control=['except'], data=[]] |
def check_direct_mode_cluster_definition(cluster, config_path):
'''
Check the cluster definition for direct mode
:param cluster:
:param config_path:
:return:
'''
config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
if not os.path.isdir(config_path):
return False
return True | def function[check_direct_mode_cluster_definition, parameter[cluster, config_path]]:
constant[
Check the cluster definition for direct mode
:param cluster:
:param config_path:
:return:
]
variable[config_path] assign[=] call[name[config].get_heron_cluster_conf_dir, parameter[name[cluster], name[config_path]]]
if <ast.UnaryOp object at 0x7da20cabfd60> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[check_direct_mode_cluster_definition] ( identifier[cluster] , identifier[config_path] ):
literal[string]
identifier[config_path] = identifier[config] . identifier[get_heron_cluster_conf_dir] ( identifier[cluster] , identifier[config_path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[config_path] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def check_direct_mode_cluster_definition(cluster, config_path):
"""
Check the cluster definition for direct mode
:param cluster:
:param config_path:
:return:
"""
config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
if not os.path.isdir(config_path):
return False # depends on [control=['if'], data=[]]
return True |
def end_table_last_footer(self):
r"""End the table foot which will appear on the last page."""
if self.lastFoot:
msg = "Table already has a last foot"
raise TableError(msg)
self.lastFoot = True
self.append(Command('endlastfoot')) | def function[end_table_last_footer, parameter[self]]:
constant[End the table foot which will appear on the last page.]
if name[self].lastFoot begin[:]
variable[msg] assign[=] constant[Table already has a last foot]
<ast.Raise object at 0x7da18bccb2e0>
name[self].lastFoot assign[=] constant[True]
call[name[self].append, parameter[call[name[Command], parameter[constant[endlastfoot]]]]] | keyword[def] identifier[end_table_last_footer] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[lastFoot] :
identifier[msg] = literal[string]
keyword[raise] identifier[TableError] ( identifier[msg] )
identifier[self] . identifier[lastFoot] = keyword[True]
identifier[self] . identifier[append] ( identifier[Command] ( literal[string] )) | def end_table_last_footer(self):
"""End the table foot which will appear on the last page."""
if self.lastFoot:
msg = 'Table already has a last foot'
raise TableError(msg) # depends on [control=['if'], data=[]]
self.lastFoot = True
self.append(Command('endlastfoot')) |
def clearOverlayTexture(self, ulOverlayHandle):
"""Use this to tell the overlay system to release the texture set for this overlay."""
fn = self.function_table.clearOverlayTexture
result = fn(ulOverlayHandle)
return result | def function[clearOverlayTexture, parameter[self, ulOverlayHandle]]:
constant[Use this to tell the overlay system to release the texture set for this overlay.]
variable[fn] assign[=] name[self].function_table.clearOverlayTexture
variable[result] assign[=] call[name[fn], parameter[name[ulOverlayHandle]]]
return[name[result]] | keyword[def] identifier[clearOverlayTexture] ( identifier[self] , identifier[ulOverlayHandle] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[clearOverlayTexture]
identifier[result] = identifier[fn] ( identifier[ulOverlayHandle] )
keyword[return] identifier[result] | def clearOverlayTexture(self, ulOverlayHandle):
"""Use this to tell the overlay system to release the texture set for this overlay."""
fn = self.function_table.clearOverlayTexture
result = fn(ulOverlayHandle)
return result |
def get_recent_pages(self, namespaces, rccontinue=''):
"""Retrieve recent pages from all namespaces starting from rccontinue."""
namespaces.sort()
params = {
"action": "query",
"list": "recentchanges",
"rclimit": self.limit,
"rcnamespace": "|".join(namespaces),
"rcprop": "title|timestamp|ids",
"format": "json"
}
if rccontinue:
params['rccontinue'] = rccontinue
return self.call(params) | def function[get_recent_pages, parameter[self, namespaces, rccontinue]]:
constant[Retrieve recent pages from all namespaces starting from rccontinue.]
call[name[namespaces].sort, parameter[]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b020d990>, <ast.Constant object at 0x7da1b020de40>, <ast.Constant object at 0x7da1b020d810>, <ast.Constant object at 0x7da1b020f940>, <ast.Constant object at 0x7da1b020e530>, <ast.Constant object at 0x7da1b020dcf0>], [<ast.Constant object at 0x7da1b020f820>, <ast.Constant object at 0x7da1b020ddb0>, <ast.Attribute object at 0x7da1b020fb50>, <ast.Call object at 0x7da1b0353400>, <ast.Constant object at 0x7da1b0381b10>, <ast.Constant object at 0x7da1b0380a60>]]
if name[rccontinue] begin[:]
call[name[params]][constant[rccontinue]] assign[=] name[rccontinue]
return[call[name[self].call, parameter[name[params]]]] | keyword[def] identifier[get_recent_pages] ( identifier[self] , identifier[namespaces] , identifier[rccontinue] = literal[string] ):
literal[string]
identifier[namespaces] . identifier[sort] ()
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[limit] ,
literal[string] : literal[string] . identifier[join] ( identifier[namespaces] ),
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[if] identifier[rccontinue] :
identifier[params] [ literal[string] ]= identifier[rccontinue]
keyword[return] identifier[self] . identifier[call] ( identifier[params] ) | def get_recent_pages(self, namespaces, rccontinue=''):
"""Retrieve recent pages from all namespaces starting from rccontinue."""
namespaces.sort()
params = {'action': 'query', 'list': 'recentchanges', 'rclimit': self.limit, 'rcnamespace': '|'.join(namespaces), 'rcprop': 'title|timestamp|ids', 'format': 'json'}
if rccontinue:
params['rccontinue'] = rccontinue # depends on [control=['if'], data=[]]
return self.call(params) |
def load_dict(self, data, overwrite=False, auto_load_model=True):
"""
Load a dictionary into the model.
Args:
data(dict): Dictionary to load
overwrite(bool): Whether the data present in the model should be overwritten by the
data in the dict or not.
auto_load_model(bool): If set to true models will be loaded as they are needed
Examples:
>>> vlans_dict = {
>>> "vlans": { "vlan": { 100: {
>>> "config": {
>>> "vlan_id": 100, "name": "production"}},
>>> 200: {
>>> "config": {
>>> "vlan_id": 200, "name": "dev"}}}}}
>>> config.load_dict(vlans_dict)
>>> print(config.vlans.vlan.keys())
... [200, 100]
>>> print(100, config.vlans.vlan[100].config.name)
... (100, u'production')
>>> print(200, config.vlans.vlan[200].config.name)
... (200, u'dev')
"""
for k, v in data.items():
if k not in self._elements.keys() and not auto_load_model:
raise AttributeError("Model {} is not loaded".format(k))
elif k not in self._elements.keys() and auto_load_model:
self._load_model(k)
attr = getattr(self, k)
_load_dict(attr, v) | def function[load_dict, parameter[self, data, overwrite, auto_load_model]]:
constant[
Load a dictionary into the model.
Args:
data(dict): Dictionary to load
overwrite(bool): Whether the data present in the model should be overwritten by the
data in the dict or not.
auto_load_model(bool): If set to true models will be loaded as they are needed
Examples:
>>> vlans_dict = {
>>> "vlans": { "vlan": { 100: {
>>> "config": {
>>> "vlan_id": 100, "name": "production"}},
>>> 200: {
>>> "config": {
>>> "vlan_id": 200, "name": "dev"}}}}}
>>> config.load_dict(vlans_dict)
>>> print(config.vlans.vlan.keys())
... [200, 100]
>>> print(100, config.vlans.vlan[100].config.name)
... (100, u'production')
>>> print(200, config.vlans.vlan[200].config.name)
... (200, u'dev')
]
for taget[tuple[[<ast.Name object at 0x7da1b02855d0>, <ast.Name object at 0x7da1b0284370>]]] in starred[call[name[data].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0285000> begin[:]
<ast.Raise object at 0x7da1b0287670>
variable[attr] assign[=] call[name[getattr], parameter[name[self], name[k]]]
call[name[_load_dict], parameter[name[attr], name[v]]] | keyword[def] identifier[load_dict] ( identifier[self] , identifier[data] , identifier[overwrite] = keyword[False] , identifier[auto_load_model] = keyword[True] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[_elements] . identifier[keys] () keyword[and] keyword[not] identifier[auto_load_model] :
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[k] ))
keyword[elif] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[_elements] . identifier[keys] () keyword[and] identifier[auto_load_model] :
identifier[self] . identifier[_load_model] ( identifier[k] )
identifier[attr] = identifier[getattr] ( identifier[self] , identifier[k] )
identifier[_load_dict] ( identifier[attr] , identifier[v] ) | def load_dict(self, data, overwrite=False, auto_load_model=True):
"""
Load a dictionary into the model.
Args:
data(dict): Dictionary to load
overwrite(bool): Whether the data present in the model should be overwritten by the
data in the dict or not.
auto_load_model(bool): If set to true models will be loaded as they are needed
Examples:
>>> vlans_dict = {
>>> "vlans": { "vlan": { 100: {
>>> "config": {
>>> "vlan_id": 100, "name": "production"}},
>>> 200: {
>>> "config": {
>>> "vlan_id": 200, "name": "dev"}}}}}
>>> config.load_dict(vlans_dict)
>>> print(config.vlans.vlan.keys())
... [200, 100]
>>> print(100, config.vlans.vlan[100].config.name)
... (100, u'production')
>>> print(200, config.vlans.vlan[200].config.name)
... (200, u'dev')
"""
for (k, v) in data.items():
if k not in self._elements.keys() and (not auto_load_model):
raise AttributeError('Model {} is not loaded'.format(k)) # depends on [control=['if'], data=[]]
elif k not in self._elements.keys() and auto_load_model:
self._load_model(k) # depends on [control=['if'], data=[]]
attr = getattr(self, k)
_load_dict(attr, v) # depends on [control=['for'], data=[]] |
def find_omim_type(self):
'''
This f(x) needs to be rehomed and shared.
Use OMIM's discription of their identifiers
to heuristically partition them into genes | phenotypes-diseases
type could be
- `obsolete` Check `omim_replaced` populated as side effect
- 'Suspected' (phenotype) Ignoring thus far
- 'gene'
- 'Phenotype'
- 'heritable_phenotypic_marker' Probable phenotype
- 'has_affected_feature' Use as both a gene and a phenotype
:return hash of omim_number to ontology_curie
'''
src_key = 'mimtitles'
myfile = '/'.join((self.rawdir, self.files[src_key]['file']))
# col = self.files[src_key]['columns']
omim_type = {}
with open(myfile, 'r') as filereader:
reader = csv.reader(filereader, delimiter='\t')
# todo header check
for row in reader:
if row[0][0] == '#': # skip comments
continue
elif row[0] == 'Caret': # moved|removed|split -> moved twice
# populating a dict from an omim to a set of omims
# here as a side effect which is less than ideal
(prefix, omim_id, destination, empty, empty) = row
omim_type[omim_id] = self.globaltt['obsolete']
if row[2][:9] == 'MOVED TO ':
token = row[2].split(' ')
rep = token[2]
if not re.match(r'^[0-9]{6}$', rep):
LOG.error('Report malformed omim replacement %s', rep)
# clean up one I know about
if rep[0] == '{' and rep[7] == '}':
rep = rep[1:6]
LOG.info('cleaned up %s', rep)
if len(rep) == 7 and rep[6] == ',':
rep = rep[:5]
LOG.info('cleaned up %s', rep)
# asuming splits are typically to both gene & phenotype
if len(token) > 3:
self.omim_replaced[omim_id] = {rep, token[4]}
else:
self.omim_replaced[omim_id] = {rep}
elif row[0] == 'Asterisk': # declared as gene
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['gene']
elif row[0] == 'NULL':
# potential model of disease?
(prefix, omim_id, pref_label, alt_label, inc_label) = row
#
omim_type[omim_id] = self.globaltt['Suspected'] # NCIT:C71458
elif row[0] == 'Number Sign':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['Phenotype']
elif row[0] == 'Percent':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['heritable_phenotypic_marker']
elif row[0] == 'Plus':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
# to be interperted as a gene and/or a phenotype
omim_type[omim_id] = self.globaltt['has_affected_feature']
else:
LOG.error('Unlnown OMIM type line %s', reader.line_num)
return omim_type | def function[find_omim_type, parameter[self]]:
constant[
This f(x) needs to be rehomed and shared.
Use OMIM's discription of their identifiers
to heuristically partition them into genes | phenotypes-diseases
type could be
- `obsolete` Check `omim_replaced` populated as side effect
- 'Suspected' (phenotype) Ignoring thus far
- 'gene'
- 'Phenotype'
- 'heritable_phenotypic_marker' Probable phenotype
- 'has_affected_feature' Use as both a gene and a phenotype
:return hash of omim_number to ontology_curie
]
variable[src_key] assign[=] constant[mimtitles]
variable[myfile] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da20e9b1570>, <ast.Subscript object at 0x7da20e9b0520>]]]]
variable[omim_type] assign[=] dictionary[[], []]
with call[name[open], parameter[name[myfile], constant[r]]] begin[:]
variable[reader] assign[=] call[name[csv].reader, parameter[name[filereader]]]
for taget[name[row]] in starred[name[reader]] begin[:]
if compare[call[call[name[row]][constant[0]]][constant[0]] equal[==] constant[#]] begin[:]
continue
return[name[omim_type]] | keyword[def] identifier[find_omim_type] ( identifier[self] ):
literal[string]
identifier[src_key] = literal[string]
identifier[myfile] = literal[string] . identifier[join] (( identifier[self] . identifier[rawdir] , identifier[self] . identifier[files] [ identifier[src_key] ][ literal[string] ]))
identifier[omim_type] ={}
keyword[with] identifier[open] ( identifier[myfile] , literal[string] ) keyword[as] identifier[filereader] :
identifier[reader] = identifier[csv] . identifier[reader] ( identifier[filereader] , identifier[delimiter] = literal[string] )
keyword[for] identifier[row] keyword[in] identifier[reader] :
keyword[if] identifier[row] [ literal[int] ][ literal[int] ]== literal[string] :
keyword[continue]
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[destination] , identifier[empty] , identifier[empty] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[if] identifier[row] [ literal[int] ][: literal[int] ]== literal[string] :
identifier[token] = identifier[row] [ literal[int] ]. identifier[split] ( literal[string] )
identifier[rep] = identifier[token] [ literal[int] ]
keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[rep] ):
identifier[LOG] . identifier[error] ( literal[string] , identifier[rep] )
keyword[if] identifier[rep] [ literal[int] ]== literal[string] keyword[and] identifier[rep] [ literal[int] ]== literal[string] :
identifier[rep] = identifier[rep] [ literal[int] : literal[int] ]
identifier[LOG] . identifier[info] ( literal[string] , identifier[rep] )
keyword[if] identifier[len] ( identifier[rep] )== literal[int] keyword[and] identifier[rep] [ literal[int] ]== literal[string] :
identifier[rep] = identifier[rep] [: literal[int] ]
identifier[LOG] . identifier[info] ( literal[string] , identifier[rep] )
keyword[if] identifier[len] ( identifier[token] )> literal[int] :
identifier[self] . identifier[omim_replaced] [ identifier[omim_id] ]={ identifier[rep] , identifier[token] [ literal[int] ]}
keyword[else] :
identifier[self] . identifier[omim_replaced] [ identifier[omim_id] ]={ identifier[rep] }
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[pref_label] , identifier[alt_label] , identifier[inc_label] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[pref_label] , identifier[alt_label] , identifier[inc_label] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[pref_label] , identifier[alt_label] , identifier[inc_label] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[pref_label] , identifier[alt_label] , identifier[inc_label] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[elif] identifier[row] [ literal[int] ]== literal[string] :
( identifier[prefix] , identifier[omim_id] , identifier[pref_label] , identifier[alt_label] , identifier[inc_label] )= identifier[row]
identifier[omim_type] [ identifier[omim_id] ]= identifier[self] . identifier[globaltt] [ literal[string] ]
keyword[else] :
identifier[LOG] . identifier[error] ( literal[string] , identifier[reader] . identifier[line_num] )
keyword[return] identifier[omim_type] | def find_omim_type(self):
"""
This f(x) needs to be rehomed and shared.
Use OMIM's discription of their identifiers
to heuristically partition them into genes | phenotypes-diseases
type could be
- `obsolete` Check `omim_replaced` populated as side effect
- 'Suspected' (phenotype) Ignoring thus far
- 'gene'
- 'Phenotype'
- 'heritable_phenotypic_marker' Probable phenotype
- 'has_affected_feature' Use as both a gene and a phenotype
:return hash of omim_number to ontology_curie
"""
src_key = 'mimtitles'
myfile = '/'.join((self.rawdir, self.files[src_key]['file']))
# col = self.files[src_key]['columns']
omim_type = {}
with open(myfile, 'r') as filereader:
reader = csv.reader(filereader, delimiter='\t')
# todo header check
for row in reader:
if row[0][0] == '#': # skip comments
continue # depends on [control=['if'], data=[]]
elif row[0] == 'Caret': # moved|removed|split -> moved twice
# populating a dict from an omim to a set of omims
# here as a side effect which is less than ideal
(prefix, omim_id, destination, empty, empty) = row
omim_type[omim_id] = self.globaltt['obsolete']
if row[2][:9] == 'MOVED TO ':
token = row[2].split(' ')
rep = token[2]
if not re.match('^[0-9]{6}$', rep):
LOG.error('Report malformed omim replacement %s', rep)
# clean up one I know about
if rep[0] == '{' and rep[7] == '}':
rep = rep[1:6]
LOG.info('cleaned up %s', rep) # depends on [control=['if'], data=[]]
if len(rep) == 7 and rep[6] == ',':
rep = rep[:5]
LOG.info('cleaned up %s', rep) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# asuming splits are typically to both gene & phenotype
if len(token) > 3:
self.omim_replaced[omim_id] = {rep, token[4]} # depends on [control=['if'], data=[]]
else:
self.omim_replaced[omim_id] = {rep} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif row[0] == 'Asterisk': # declared as gene
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['gene'] # depends on [control=['if'], data=[]]
elif row[0] == 'NULL':
# potential model of disease?
(prefix, omim_id, pref_label, alt_label, inc_label) = row
#
omim_type[omim_id] = self.globaltt['Suspected'] # NCIT:C71458 # depends on [control=['if'], data=[]]
elif row[0] == 'Number Sign':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['Phenotype'] # depends on [control=['if'], data=[]]
elif row[0] == 'Percent':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
omim_type[omim_id] = self.globaltt['heritable_phenotypic_marker'] # depends on [control=['if'], data=[]]
elif row[0] == 'Plus':
(prefix, omim_id, pref_label, alt_label, inc_label) = row
# to be interperted as a gene and/or a phenotype
omim_type[omim_id] = self.globaltt['has_affected_feature'] # depends on [control=['if'], data=[]]
else:
LOG.error('Unlnown OMIM type line %s', reader.line_num) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['filereader']]
return omim_type |
def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True):
"""
Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_
"""
poly = self.polydata(True)
if N: # N = desired number of points
Np = poly.GetNumberOfPoints()
fraction = float(N) / Np
if fraction >= 1:
return self
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(poly)
decimate.SetTargetReduction(1 - fraction)
decimate.PreserveTopologyOff()
if boundaries:
decimate.BoundaryVertexDeletionOff()
else:
decimate.BoundaryVertexDeletionOn()
decimate.Update()
if verbose:
print("Nr. of pts, input:", poly.GetNumberOfPoints(), end="")
print(" output:", decimate.GetOutput().GetNumberOfPoints())
return self.updateMesh(decimate.GetOutput()) | def function[decimate, parameter[self, fraction, N, boundaries, verbose]]:
constant[
Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_
]
variable[poly] assign[=] call[name[self].polydata, parameter[constant[True]]]
if name[N] begin[:]
variable[Np] assign[=] call[name[poly].GetNumberOfPoints, parameter[]]
variable[fraction] assign[=] binary_operation[call[name[float], parameter[name[N]]] / name[Np]]
if compare[name[fraction] greater_or_equal[>=] constant[1]] begin[:]
return[name[self]]
variable[decimate] assign[=] call[name[vtk].vtkDecimatePro, parameter[]]
call[name[decimate].SetInputData, parameter[name[poly]]]
call[name[decimate].SetTargetReduction, parameter[binary_operation[constant[1] - name[fraction]]]]
call[name[decimate].PreserveTopologyOff, parameter[]]
if name[boundaries] begin[:]
call[name[decimate].BoundaryVertexDeletionOff, parameter[]]
call[name[decimate].Update, parameter[]]
if name[verbose] begin[:]
call[name[print], parameter[constant[Nr. of pts, input:], call[name[poly].GetNumberOfPoints, parameter[]]]]
call[name[print], parameter[constant[ output:], call[call[name[decimate].GetOutput, parameter[]].GetNumberOfPoints, parameter[]]]]
return[call[name[self].updateMesh, parameter[call[name[decimate].GetOutput, parameter[]]]]] | keyword[def] identifier[decimate] ( identifier[self] , identifier[fraction] = literal[int] , identifier[N] = keyword[None] , identifier[boundaries] = keyword[False] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[poly] = identifier[self] . identifier[polydata] ( keyword[True] )
keyword[if] identifier[N] :
identifier[Np] = identifier[poly] . identifier[GetNumberOfPoints] ()
identifier[fraction] = identifier[float] ( identifier[N] )/ identifier[Np]
keyword[if] identifier[fraction] >= literal[int] :
keyword[return] identifier[self]
identifier[decimate] = identifier[vtk] . identifier[vtkDecimatePro] ()
identifier[decimate] . identifier[SetInputData] ( identifier[poly] )
identifier[decimate] . identifier[SetTargetReduction] ( literal[int] - identifier[fraction] )
identifier[decimate] . identifier[PreserveTopologyOff] ()
keyword[if] identifier[boundaries] :
identifier[decimate] . identifier[BoundaryVertexDeletionOff] ()
keyword[else] :
identifier[decimate] . identifier[BoundaryVertexDeletionOn] ()
identifier[decimate] . identifier[Update] ()
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , identifier[poly] . identifier[GetNumberOfPoints] (), identifier[end] = literal[string] )
identifier[print] ( literal[string] , identifier[decimate] . identifier[GetOutput] (). identifier[GetNumberOfPoints] ())
keyword[return] identifier[self] . identifier[updateMesh] ( identifier[decimate] . identifier[GetOutput] ()) | def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True):
"""
Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_
"""
poly = self.polydata(True)
if N: # N = desired number of points
Np = poly.GetNumberOfPoints()
fraction = float(N) / Np
if fraction >= 1:
return self # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(poly)
decimate.SetTargetReduction(1 - fraction)
decimate.PreserveTopologyOff()
if boundaries:
decimate.BoundaryVertexDeletionOff() # depends on [control=['if'], data=[]]
else:
decimate.BoundaryVertexDeletionOn()
decimate.Update()
if verbose:
print('Nr. of pts, input:', poly.GetNumberOfPoints(), end='')
print(' output:', decimate.GetOutput().GetNumberOfPoints()) # depends on [control=['if'], data=[]]
return self.updateMesh(decimate.GetOutput()) |
def result(self):
"""
Returns the result of the Future, which makes the call synchronous if the result has not been computed yet.
:return: Result of the Future.
"""
self._reactor_check()
self._event.wait()
if self._exception:
six.reraise(self._exception.__class__, self._exception, self._traceback)
if self._result == NONE_RESULT:
return None
else:
return self._result | def function[result, parameter[self]]:
constant[
Returns the result of the Future, which makes the call synchronous if the result has not been computed yet.
:return: Result of the Future.
]
call[name[self]._reactor_check, parameter[]]
call[name[self]._event.wait, parameter[]]
if name[self]._exception begin[:]
call[name[six].reraise, parameter[name[self]._exception.__class__, name[self]._exception, name[self]._traceback]]
if compare[name[self]._result equal[==] name[NONE_RESULT]] begin[:]
return[constant[None]] | keyword[def] identifier[result] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_reactor_check] ()
identifier[self] . identifier[_event] . identifier[wait] ()
keyword[if] identifier[self] . identifier[_exception] :
identifier[six] . identifier[reraise] ( identifier[self] . identifier[_exception] . identifier[__class__] , identifier[self] . identifier[_exception] , identifier[self] . identifier[_traceback] )
keyword[if] identifier[self] . identifier[_result] == identifier[NONE_RESULT] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[self] . identifier[_result] | def result(self):
"""
Returns the result of the Future, which makes the call synchronous if the result has not been computed yet.
:return: Result of the Future.
"""
self._reactor_check()
self._event.wait()
if self._exception:
six.reraise(self._exception.__class__, self._exception, self._traceback) # depends on [control=['if'], data=[]]
if self._result == NONE_RESULT:
return None # depends on [control=['if'], data=[]]
else:
return self._result |
def _get_k8s_model_attribute(model_type, field_name):
"""
Takes a model type and a Kubernetes API resource field name (such as
"serviceAccount") and returns a related attribute name (such as
"service_account") to be used with kubernetes.client.models objects. It is
impossible to prove a negative but it seems like it is always a question of
making camelCase to snake_case but by using the provided 'attribute_map' we
also ensure that the fields actually exist.
Example of V1PodSpec's attribute_map:
{
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_policy': 'dnsPolicy',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'priority': 'priority',
'priority_class_name': 'priorityClassName',
'restart_policy': 'restartPolicy',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
"""
# if we get "service_account", return
if field_name in model_type.attribute_map:
return field_name
# if we get "serviceAccount", then return "service_account"
for key, value in model_type.attribute_map.items():
if value == field_name:
return key
else:
raise ValueError("'{}' did not have an attribute matching '{}'".format(model_type.__name__, field_name)) | def function[_get_k8s_model_attribute, parameter[model_type, field_name]]:
constant[
Takes a model type and a Kubernetes API resource field name (such as
"serviceAccount") and returns a related attribute name (such as
"service_account") to be used with kubernetes.client.models objects. It is
impossible to prove a negative but it seems like it is always a question of
making camelCase to snake_case but by using the provided 'attribute_map' we
also ensure that the fields actually exist.
Example of V1PodSpec's attribute_map:
{
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_policy': 'dnsPolicy',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'priority': 'priority',
'priority_class_name': 'priorityClassName',
'restart_policy': 'restartPolicy',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
]
if compare[name[field_name] in name[model_type].attribute_map] begin[:]
return[name[field_name]]
for taget[tuple[[<ast.Name object at 0x7da1b16c0040>, <ast.Name object at 0x7da1b16c0c70>]]] in starred[call[name[model_type].attribute_map.items, parameter[]]] begin[:]
if compare[name[value] equal[==] name[field_name]] begin[:]
return[name[key]] | keyword[def] identifier[_get_k8s_model_attribute] ( identifier[model_type] , identifier[field_name] ):
literal[string]
keyword[if] identifier[field_name] keyword[in] identifier[model_type] . identifier[attribute_map] :
keyword[return] identifier[field_name]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[model_type] . identifier[attribute_map] . identifier[items] ():
keyword[if] identifier[value] == identifier[field_name] :
keyword[return] identifier[key]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[model_type] . identifier[__name__] , identifier[field_name] )) | def _get_k8s_model_attribute(model_type, field_name):
"""
Takes a model type and a Kubernetes API resource field name (such as
"serviceAccount") and returns a related attribute name (such as
"service_account") to be used with kubernetes.client.models objects. It is
impossible to prove a negative but it seems like it is always a question of
making camelCase to snake_case but by using the provided 'attribute_map' we
also ensure that the fields actually exist.
Example of V1PodSpec's attribute_map:
{
'active_deadline_seconds': 'activeDeadlineSeconds',
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'containers': 'containers',
'dns_policy': 'dnsPolicy',
'host_aliases': 'hostAliases',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'hostname': 'hostname',
'image_pull_secrets': 'imagePullSecrets',
'init_containers': 'initContainers',
'node_name': 'nodeName',
'node_selector': 'nodeSelector',
'priority': 'priority',
'priority_class_name': 'priorityClassName',
'restart_policy': 'restartPolicy',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'service_account': 'serviceAccount',
'service_account_name': 'serviceAccountName',
'subdomain': 'subdomain',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
"""
# if we get "service_account", return
if field_name in model_type.attribute_map:
return field_name # depends on [control=['if'], data=['field_name']]
# if we get "serviceAccount", then return "service_account"
for (key, value) in model_type.attribute_map.items():
if value == field_name:
return key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
raise ValueError("'{}' did not have an attribute matching '{}'".format(model_type.__name__, field_name)) |
def update_timestampable_model(sender, instance, *args, **kwargs):
'''
Using signals guarantees that timestamps are set no matter what:
loading fixtures, bulk inserts, bulk updates, etc.
Indeed, the `save()` method is *not* called when using fixtures.
'''
if not isinstance(instance, TimestampableModel):
return
if not instance.pk:
instance.created_at = now()
instance.updated_at = now() | def function[update_timestampable_model, parameter[sender, instance]]:
constant[
Using signals guarantees that timestamps are set no matter what:
loading fixtures, bulk inserts, bulk updates, etc.
Indeed, the `save()` method is *not* called when using fixtures.
]
if <ast.UnaryOp object at 0x7da1b0abad70> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b0aba9b0> begin[:]
name[instance].created_at assign[=] call[name[now], parameter[]]
name[instance].updated_at assign[=] call[name[now], parameter[]] | keyword[def] identifier[update_timestampable_model] ( identifier[sender] , identifier[instance] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[instance] , identifier[TimestampableModel] ):
keyword[return]
keyword[if] keyword[not] identifier[instance] . identifier[pk] :
identifier[instance] . identifier[created_at] = identifier[now] ()
identifier[instance] . identifier[updated_at] = identifier[now] () | def update_timestampable_model(sender, instance, *args, **kwargs):
"""
Using signals guarantees that timestamps are set no matter what:
loading fixtures, bulk inserts, bulk updates, etc.
Indeed, the `save()` method is *not* called when using fixtures.
"""
if not isinstance(instance, TimestampableModel):
return # depends on [control=['if'], data=[]]
if not instance.pk:
instance.created_at = now() # depends on [control=['if'], data=[]]
instance.updated_at = now() |
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) | def function[_jseq, parameter[self, cols, converter]]:
constant[Return a JVM Seq of Columns from a list of Column or names]
return[call[name[_to_seq], parameter[name[self].sql_ctx._sc, name[cols], name[converter]]]] | keyword[def] identifier[_jseq] ( identifier[self] , identifier[cols] , identifier[converter] = keyword[None] ):
literal[string]
keyword[return] identifier[_to_seq] ( identifier[self] . identifier[sql_ctx] . identifier[_sc] , identifier[cols] , identifier[converter] ) | def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) |
def remove_all_listeners(self, event=None):
"""Remove all listeners attached to ``event``.
If ``event`` is ``None``, remove all listeners on all events.
"""
if event is not None:
self._events[event] = OrderedDict()
else:
self._events = defaultdict(OrderedDict) | def function[remove_all_listeners, parameter[self, event]]:
constant[Remove all listeners attached to ``event``.
If ``event`` is ``None``, remove all listeners on all events.
]
if compare[name[event] is_not constant[None]] begin[:]
call[name[self]._events][name[event]] assign[=] call[name[OrderedDict], parameter[]] | keyword[def] identifier[remove_all_listeners] ( identifier[self] , identifier[event] = keyword[None] ):
literal[string]
keyword[if] identifier[event] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_events] [ identifier[event] ]= identifier[OrderedDict] ()
keyword[else] :
identifier[self] . identifier[_events] = identifier[defaultdict] ( identifier[OrderedDict] ) | def remove_all_listeners(self, event=None):
"""Remove all listeners attached to ``event``.
If ``event`` is ``None``, remove all listeners on all events.
"""
if event is not None:
self._events[event] = OrderedDict() # depends on [control=['if'], data=['event']]
else:
self._events = defaultdict(OrderedDict) |
def _translate_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
if _PYTHON_VERSION > (3, 2):
# ditch start and end characters
start, _, end = self._glob_to_re('_').partition('_')
if pattern:
pattern_re = self._glob_to_re(pattern)
if _PYTHON_VERSION > (3, 2):
assert pattern_re.startswith(start) and pattern_re.endswith(end)
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
if _PYTHON_VERSION <= (3, 2):
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
else:
prefix_re = self._glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + sep.join((prefix_re,
'.*' + pattern_re))
else:
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
pattern_re, end)
else: # no prefix -- respect anchor flag
if anchor:
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + pattern_re
else:
pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
return re.compile(pattern_re) | def function[_translate_pattern, parameter[self, pattern, anchor, prefix, is_regex]]:
constant[Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
]
if name[is_regex] begin[:]
if call[name[isinstance], parameter[name[pattern], name[str]]] begin[:]
return[call[name[re].compile, parameter[name[pattern]]]]
if compare[name[_PYTHON_VERSION] greater[>] tuple[[<ast.Constant object at 0x7da1b1ea01f0>, <ast.Constant object at 0x7da1b1ea11b0>]]] begin[:]
<ast.Tuple object at 0x7da1b1ea0490> assign[=] call[call[name[self]._glob_to_re, parameter[constant[_]]].partition, parameter[constant[_]]]
if name[pattern] begin[:]
variable[pattern_re] assign[=] call[name[self]._glob_to_re, parameter[name[pattern]]]
if compare[name[_PYTHON_VERSION] greater[>] tuple[[<ast.Constant object at 0x7da1b1e8e350>, <ast.Constant object at 0x7da1b1e8fc10>]]] begin[:]
assert[<ast.BoolOp object at 0x7da1b1e8cfd0>]
variable[base] assign[=] call[name[re].escape, parameter[call[name[os].path.join, parameter[name[self].base, constant[]]]]]
if compare[name[prefix] is_not constant[None]] begin[:]
if compare[name[_PYTHON_VERSION] less_or_equal[<=] tuple[[<ast.Constant object at 0x7da1b1e8f040>, <ast.Constant object at 0x7da1b1e8d330>]]] begin[:]
variable[empty_pattern] assign[=] call[name[self]._glob_to_re, parameter[constant[]]]
variable[prefix_re] assign[=] call[call[name[self]._glob_to_re, parameter[name[prefix]]]][<ast.Slice object at 0x7da1b1e8dea0>]
variable[sep] assign[=] name[os].sep
if compare[name[os].sep equal[==] constant[\]] begin[:]
variable[sep] assign[=] constant[\\]
if compare[name[_PYTHON_VERSION] less_or_equal[<=] tuple[[<ast.Constant object at 0x7da1b1e8e0b0>, <ast.Constant object at 0x7da1b1e8d4e0>]]] begin[:]
variable[pattern_re] assign[=] binary_operation[binary_operation[constant[^] + name[base]] + call[name[sep].join, parameter[tuple[[<ast.Name object at 0x7da1b1e8f490>, <ast.BinOp object at 0x7da1b1e8f100>]]]]]
return[call[name[re].compile, parameter[name[pattern_re]]]] | keyword[def] identifier[_translate_pattern] ( identifier[self] , identifier[pattern] , identifier[anchor] = keyword[True] , identifier[prefix] = keyword[None] ,
identifier[is_regex] = keyword[False] ):
literal[string]
keyword[if] identifier[is_regex] :
keyword[if] identifier[isinstance] ( identifier[pattern] , identifier[str] ):
keyword[return] identifier[re] . identifier[compile] ( identifier[pattern] )
keyword[else] :
keyword[return] identifier[pattern]
keyword[if] identifier[_PYTHON_VERSION] >( literal[int] , literal[int] ):
identifier[start] , identifier[_] , identifier[end] = identifier[self] . identifier[_glob_to_re] ( literal[string] ). identifier[partition] ( literal[string] )
keyword[if] identifier[pattern] :
identifier[pattern_re] = identifier[self] . identifier[_glob_to_re] ( identifier[pattern] )
keyword[if] identifier[_PYTHON_VERSION] >( literal[int] , literal[int] ):
keyword[assert] identifier[pattern_re] . identifier[startswith] ( identifier[start] ) keyword[and] identifier[pattern_re] . identifier[endswith] ( identifier[end] )
keyword[else] :
identifier[pattern_re] = literal[string]
identifier[base] = identifier[re] . identifier[escape] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[base] , literal[string] ))
keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[_PYTHON_VERSION] <=( literal[int] , literal[int] ):
identifier[empty_pattern] = identifier[self] . identifier[_glob_to_re] ( literal[string] )
identifier[prefix_re] = identifier[self] . identifier[_glob_to_re] ( identifier[prefix] )[:- identifier[len] ( identifier[empty_pattern] )]
keyword[else] :
identifier[prefix_re] = identifier[self] . identifier[_glob_to_re] ( identifier[prefix] )
keyword[assert] identifier[prefix_re] . identifier[startswith] ( identifier[start] ) keyword[and] identifier[prefix_re] . identifier[endswith] ( identifier[end] )
identifier[prefix_re] = identifier[prefix_re] [ identifier[len] ( identifier[start] ): identifier[len] ( identifier[prefix_re] )- identifier[len] ( identifier[end] )]
identifier[sep] = identifier[os] . identifier[sep]
keyword[if] identifier[os] . identifier[sep] == literal[string] :
identifier[sep] = literal[string]
keyword[if] identifier[_PYTHON_VERSION] <=( literal[int] , literal[int] ):
identifier[pattern_re] = literal[string] + identifier[base] + identifier[sep] . identifier[join] (( identifier[prefix_re] ,
literal[string] + identifier[pattern_re] ))
keyword[else] :
identifier[pattern_re] = identifier[pattern_re] [ identifier[len] ( identifier[start] ): identifier[len] ( identifier[pattern_re] )- identifier[len] ( identifier[end] )]
identifier[pattern_re] = literal[string] %( identifier[start] , identifier[base] , identifier[prefix_re] , identifier[sep] ,
identifier[pattern_re] , identifier[end] )
keyword[else] :
keyword[if] identifier[anchor] :
keyword[if] identifier[_PYTHON_VERSION] <=( literal[int] , literal[int] ):
identifier[pattern_re] = literal[string] + identifier[base] + identifier[pattern_re]
keyword[else] :
identifier[pattern_re] = literal[string] %( identifier[start] , identifier[base] , identifier[pattern_re] [ identifier[len] ( identifier[start] ):])
keyword[return] identifier[re] . identifier[compile] ( identifier[pattern_re] ) | def _translate_pattern(self, pattern, anchor=True, prefix=None, is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern) # depends on [control=['if'], data=[]]
else:
return pattern # depends on [control=['if'], data=[]]
if _PYTHON_VERSION > (3, 2):
# ditch start and end characters
(start, _, end) = self._glob_to_re('_').partition('_') # depends on [control=['if'], data=[]]
if pattern:
pattern_re = self._glob_to_re(pattern)
if _PYTHON_VERSION > (3, 2):
assert pattern_re.startswith(start) and pattern_re.endswith(end) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
if _PYTHON_VERSION <= (3, 2):
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] # depends on [control=['if'], data=[]]
else:
prefix_re = self._glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start):len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = '\\\\' # depends on [control=['if'], data=[]]
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + sep.join((prefix_re, '.*' + pattern_re)) # depends on [control=['if'], data=[]]
else:
pattern_re = pattern_re[len(start):len(pattern_re) - len(end)]
pattern_re = '%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, pattern_re, end) # depends on [control=['if'], data=['prefix']] # no prefix -- respect anchor flag
elif anchor:
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + pattern_re # depends on [control=['if'], data=[]]
else:
pattern_re = '%s%s%s' % (start, base, pattern_re[len(start):]) # depends on [control=['if'], data=[]]
return re.compile(pattern_re) |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return TOS_THR(key)
if key not in TOS_THR._member_map_:
extend_enum(TOS_THR, key, default)
return TOS_THR[key] | def function[get, parameter[key, default]]:
constant[Backport support for original codes.]
if call[name[isinstance], parameter[name[key], name[int]]] begin[:]
return[call[name[TOS_THR], parameter[name[key]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[TOS_THR]._member_map_] begin[:]
call[name[extend_enum], parameter[name[TOS_THR], name[key], name[default]]]
return[call[name[TOS_THR]][name[key]]] | keyword[def] identifier[get] ( identifier[key] , identifier[default] =- literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[int] ):
keyword[return] identifier[TOS_THR] ( identifier[key] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[TOS_THR] . identifier[_member_map_] :
identifier[extend_enum] ( identifier[TOS_THR] , identifier[key] , identifier[default] )
keyword[return] identifier[TOS_THR] [ identifier[key] ] | def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return TOS_THR(key) # depends on [control=['if'], data=[]]
if key not in TOS_THR._member_map_:
extend_enum(TOS_THR, key, default) # depends on [control=['if'], data=['key']]
return TOS_THR[key] |
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account."""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
external_id = account.extra_data.get('external_id')
if external_id:
oauth_unlink_external_id(dict(id=external_id, method='cern'))
if account:
with db.session.begin_nested():
account.delete()
disconnect_identity(g.identity)
return redirect(url_for('invenio_oauthclient_settings.index')) | def function[disconnect_handler, parameter[remote]]:
constant[Handle unlinking of remote account.]
if <ast.UnaryOp object at 0x7da1b255c9d0> begin[:]
return[call[name[current_app].login_manager.unauthorized, parameter[]]]
variable[account] assign[=] call[name[RemoteAccount].get, parameter[]]
variable[external_id] assign[=] call[name[account].extra_data.get, parameter[constant[external_id]]]
if name[external_id] begin[:]
call[name[oauth_unlink_external_id], parameter[call[name[dict], parameter[]]]]
if name[account] begin[:]
with call[name[db].session.begin_nested, parameter[]] begin[:]
call[name[account].delete, parameter[]]
call[name[disconnect_identity], parameter[name[g].identity]]
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[invenio_oauthclient_settings.index]]]]]] | keyword[def] identifier[disconnect_handler] ( identifier[remote] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[current_user] . identifier[is_authenticated] :
keyword[return] identifier[current_app] . identifier[login_manager] . identifier[unauthorized] ()
identifier[account] = identifier[RemoteAccount] . identifier[get] ( identifier[user_id] = identifier[current_user] . identifier[get_id] (),
identifier[client_id] = identifier[remote] . identifier[consumer_key] )
identifier[external_id] = identifier[account] . identifier[extra_data] . identifier[get] ( literal[string] )
keyword[if] identifier[external_id] :
identifier[oauth_unlink_external_id] ( identifier[dict] ( identifier[id] = identifier[external_id] , identifier[method] = literal[string] ))
keyword[if] identifier[account] :
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[account] . identifier[delete] ()
identifier[disconnect_identity] ( identifier[g] . identifier[identity] )
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] )) | def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account."""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized() # depends on [control=['if'], data=[]]
account = RemoteAccount.get(user_id=current_user.get_id(), client_id=remote.consumer_key)
external_id = account.extra_data.get('external_id')
if external_id:
oauth_unlink_external_id(dict(id=external_id, method='cern')) # depends on [control=['if'], data=[]]
if account:
with db.session.begin_nested():
account.delete() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
disconnect_identity(g.identity)
return redirect(url_for('invenio_oauthclient_settings.index')) |
def lattice_spheres(shape: List[int], radius: int, offset: int = 0,
lattice: str = 'sc'):
r"""
Generates a cubic packing of spheres in a specified lattice arrangement
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels in each direction. For a 2D image, use [Nx, Ny].
radius : scalar
The radius of spheres (circles) in the packing
offset : scalar
The amount offset (+ or -) to add between sphere centers.
lattice : string
Specifies the type of lattice to create. Options are:
'sc' - Simple Cubic (default)
'fcc' - Face Centered Cubic
'bcc' - Body Centered Cubic
For 2D images, 'sc' gives a square lattice and both 'fcc' and 'bcc'
give a triangular lattice.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
"""
print(78*'―')
print('lattice_spheres: Generating ' + lattice + ' lattice')
r = radius
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
im = sp.zeros(shape, dtype=bool)
im = im.squeeze()
# Parse lattice type
lattice = lattice.lower()
if im.ndim == 2:
if lattice in ['sc']:
lattice = 'sq'
if lattice in ['bcc', 'fcc']:
lattice = 'tri'
if lattice in ['sq', 'square']:
spacing = 2*r
s = int(spacing/2) + sp.array(offset)
coords = sp.mgrid[r:im.shape[0]-r:2*s,
r:im.shape[1]-r:2*s]
im[coords[0], coords[1]] = 1
elif lattice in ['tri', 'triangular']:
spacing = 2*sp.floor(sp.sqrt(2*(r**2))).astype(int)
s = int(spacing/2) + offset
coords = sp.mgrid[r:im.shape[0]-r:2*s,
r:im.shape[1]-r:2*s]
im[coords[0], coords[1]] = 1
coords = sp.mgrid[s+r:im.shape[0]-r:2*s,
s+r:im.shape[1]-r:2*s]
im[coords[0], coords[1]] = 1
elif lattice in ['sc', 'simple cubic', 'cubic']:
spacing = 2*r
s = int(spacing/2) + sp.array(offset)
coords = sp.mgrid[r:im.shape[0]-r:2*s,
r:im.shape[1]-r:2*s,
r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
elif lattice in ['bcc', 'body cenetered cubic']:
spacing = 2*sp.floor(sp.sqrt(4/3*(r**2))).astype(int)
s = int(spacing/2) + offset
coords = sp.mgrid[r:im.shape[0]-r:2*s,
r:im.shape[1]-r:2*s,
r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s+r:im.shape[0]-r:2*s,
s+r:im.shape[1]-r:2*s,
s+r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
elif lattice in ['fcc', 'face centered cubic']:
spacing = 2*sp.floor(sp.sqrt(2*(r**2))).astype(int)
s = int(spacing/2) + offset
coords = sp.mgrid[r:im.shape[0]-r:2*s,
r:im.shape[1]-r:2*s,
r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[r:im.shape[0]-r:2*s,
s+r:im.shape[1]-r:2*s,
s+r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s+r:im.shape[0]-r:2*s,
s:im.shape[1]-r:2*s,
s+r:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s+r:im.shape[0]-r:2*s,
s+r:im.shape[1]-r:2*s,
s:im.shape[2]-r:2*s]
im[coords[0], coords[1], coords[2]] = 1
im = ~(spim.distance_transform_edt(~im) < r)
return im | def function[lattice_spheres, parameter[shape, radius, offset, lattice]]:
constant[
Generates a cubic packing of spheres in a specified lattice arrangement
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels in each direction. For a 2D image, use [Nx, Ny].
radius : scalar
The radius of spheres (circles) in the packing
offset : scalar
The amount offset (+ or -) to add between sphere centers.
lattice : string
Specifies the type of lattice to create. Options are:
'sc' - Simple Cubic (default)
'fcc' - Face Centered Cubic
'bcc' - Body Centered Cubic
For 2D images, 'sc' gives a square lattice and both 'fcc' and 'bcc'
give a triangular lattice.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
]
call[name[print], parameter[binary_operation[constant[78] * constant[―]]]]
call[name[print], parameter[binary_operation[binary_operation[constant[lattice_spheres: Generating ] + name[lattice]] + constant[ lattice]]]]
variable[r] assign[=] name[radius]
variable[shape] assign[=] call[name[sp].array, parameter[name[shape]]]
if compare[call[name[sp].size, parameter[name[shape]]] equal[==] constant[1]] begin[:]
variable[shape] assign[=] call[name[sp].full, parameter[tuple[[<ast.Constant object at 0x7da1b05244c0>]], call[name[int], parameter[name[shape]]]]]
variable[im] assign[=] call[name[sp].zeros, parameter[name[shape]]]
variable[im] assign[=] call[name[im].squeeze, parameter[]]
variable[lattice] assign[=] call[name[lattice].lower, parameter[]]
if compare[name[im].ndim equal[==] constant[2]] begin[:]
if compare[name[lattice] in list[[<ast.Constant object at 0x7da1b0780910>]]] begin[:]
variable[lattice] assign[=] constant[sq]
if compare[name[lattice] in list[[<ast.Constant object at 0x7da1b0780a30>, <ast.Constant object at 0x7da1b0781300>]]] begin[:]
variable[lattice] assign[=] constant[tri]
if compare[name[lattice] in list[[<ast.Constant object at 0x7da1b0782230>, <ast.Constant object at 0x7da1b0780460>]]] begin[:]
variable[spacing] assign[=] binary_operation[constant[2] * name[r]]
variable[s] assign[=] binary_operation[call[name[int], parameter[binary_operation[name[spacing] / constant[2]]]] + call[name[sp].array, parameter[name[offset]]]]
variable[coords] assign[=] call[name[sp].mgrid][tuple[[<ast.Slice object at 0x7da1b0524bb0>, <ast.Slice object at 0x7da1b0524f10>]]]
call[name[im]][tuple[[<ast.Subscript object at 0x7da1b0525210>, <ast.Subscript object at 0x7da1b0525270>]]] assign[=] constant[1]
variable[im] assign[=] <ast.UnaryOp object at 0x7da1b05575e0>
return[name[im]] | keyword[def] identifier[lattice_spheres] ( identifier[shape] : identifier[List] [ identifier[int] ], identifier[radius] : identifier[int] , identifier[offset] : identifier[int] = literal[int] ,
identifier[lattice] : identifier[str] = literal[string] ):
literal[string]
identifier[print] ( literal[int] * literal[string] )
identifier[print] ( literal[string] + identifier[lattice] + literal[string] )
identifier[r] = identifier[radius]
identifier[shape] = identifier[sp] . identifier[array] ( identifier[shape] )
keyword[if] identifier[sp] . identifier[size] ( identifier[shape] )== literal[int] :
identifier[shape] = identifier[sp] . identifier[full] (( literal[int] ,), identifier[int] ( identifier[shape] ))
identifier[im] = identifier[sp] . identifier[zeros] ( identifier[shape] , identifier[dtype] = identifier[bool] )
identifier[im] = identifier[im] . identifier[squeeze] ()
identifier[lattice] = identifier[lattice] . identifier[lower] ()
keyword[if] identifier[im] . identifier[ndim] == literal[int] :
keyword[if] identifier[lattice] keyword[in] [ literal[string] ]:
identifier[lattice] = literal[string]
keyword[if] identifier[lattice] keyword[in] [ literal[string] , literal[string] ]:
identifier[lattice] = literal[string]
keyword[if] identifier[lattice] keyword[in] [ literal[string] , literal[string] ]:
identifier[spacing] = literal[int] * identifier[r]
identifier[s] = identifier[int] ( identifier[spacing] / literal[int] )+ identifier[sp] . identifier[array] ( identifier[offset] )
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
keyword[elif] identifier[lattice] keyword[in] [ literal[string] , literal[string] ]:
identifier[spacing] = literal[int] * identifier[sp] . identifier[floor] ( identifier[sp] . identifier[sqrt] ( literal[int] *( identifier[r] ** literal[int] ))). identifier[astype] ( identifier[int] )
identifier[s] = identifier[int] ( identifier[spacing] / literal[int] )+ identifier[offset]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
keyword[elif] identifier[lattice] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[spacing] = literal[int] * identifier[r]
identifier[s] = identifier[int] ( identifier[spacing] / literal[int] )+ identifier[sp] . identifier[array] ( identifier[offset] )
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
keyword[elif] identifier[lattice] keyword[in] [ literal[string] , literal[string] ]:
identifier[spacing] = literal[int] * identifier[sp] . identifier[floor] ( identifier[sp] . identifier[sqrt] ( literal[int] / literal[int] *( identifier[r] ** literal[int] ))). identifier[astype] ( identifier[int] )
identifier[s] = identifier[int] ( identifier[spacing] / literal[int] )+ identifier[offset]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
keyword[elif] identifier[lattice] keyword[in] [ literal[string] , literal[string] ]:
identifier[spacing] = literal[int] * identifier[sp] . identifier[floor] ( identifier[sp] . identifier[sqrt] ( literal[int] *( identifier[r] ** literal[int] ))). identifier[astype] ( identifier[int] )
identifier[s] = identifier[int] ( identifier[spacing] / literal[int] )+ identifier[offset]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[coords] = identifier[sp] . identifier[mgrid] [ identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] + identifier[r] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ,
identifier[s] : identifier[im] . identifier[shape] [ literal[int] ]- identifier[r] : literal[int] * identifier[s] ]
identifier[im] [ identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ], identifier[coords] [ literal[int] ]]= literal[int]
identifier[im] =~( identifier[spim] . identifier[distance_transform_edt] (~ identifier[im] )< identifier[r] )
keyword[return] identifier[im] | def lattice_spheres(shape: List[int], radius: int, offset: int=0, lattice: str='sc'):
"""
Generates a cubic packing of spheres in a specified lattice arrangement
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels in each direction. For a 2D image, use [Nx, Ny].
radius : scalar
The radius of spheres (circles) in the packing
offset : scalar
The amount offset (+ or -) to add between sphere centers.
lattice : string
Specifies the type of lattice to create. Options are:
'sc' - Simple Cubic (default)
'fcc' - Face Centered Cubic
'bcc' - Body Centered Cubic
For 2D images, 'sc' gives a square lattice and both 'fcc' and 'bcc'
give a triangular lattice.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
"""
print(78 * '―')
print('lattice_spheres: Generating ' + lattice + ' lattice')
r = radius
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3,), int(shape)) # depends on [control=['if'], data=[]]
im = sp.zeros(shape, dtype=bool)
im = im.squeeze()
# Parse lattice type
lattice = lattice.lower()
if im.ndim == 2:
if lattice in ['sc']:
lattice = 'sq' # depends on [control=['if'], data=['lattice']]
if lattice in ['bcc', 'fcc']:
lattice = 'tri' # depends on [control=['if'], data=['lattice']] # depends on [control=['if'], data=[]]
if lattice in ['sq', 'square']:
spacing = 2 * r
s = int(spacing / 2) + sp.array(offset)
coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s]
im[coords[0], coords[1]] = 1 # depends on [control=['if'], data=[]]
elif lattice in ['tri', 'triangular']:
spacing = 2 * sp.floor(sp.sqrt(2 * r ** 2)).astype(int)
s = int(spacing / 2) + offset
coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s]
im[coords[0], coords[1]] = 1
coords = sp.mgrid[s + r:im.shape[0] - r:2 * s, s + r:im.shape[1] - r:2 * s]
im[coords[0], coords[1]] = 1 # depends on [control=['if'], data=[]]
elif lattice in ['sc', 'simple cubic', 'cubic']:
spacing = 2 * r
s = int(spacing / 2) + sp.array(offset)
coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s, r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1 # depends on [control=['if'], data=[]]
elif lattice in ['bcc', 'body cenetered cubic']:
spacing = 2 * sp.floor(sp.sqrt(4 / 3 * r ** 2)).astype(int)
s = int(spacing / 2) + offset
coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s, r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s + r:im.shape[0] - r:2 * s, s + r:im.shape[1] - r:2 * s, s + r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1 # depends on [control=['if'], data=[]]
elif lattice in ['fcc', 'face centered cubic']:
spacing = 2 * sp.floor(sp.sqrt(2 * r ** 2)).astype(int)
s = int(spacing / 2) + offset
coords = sp.mgrid[r:im.shape[0] - r:2 * s, r:im.shape[1] - r:2 * s, r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[r:im.shape[0] - r:2 * s, s + r:im.shape[1] - r:2 * s, s + r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s + r:im.shape[0] - r:2 * s, s:im.shape[1] - r:2 * s, s + r:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1
coords = sp.mgrid[s + r:im.shape[0] - r:2 * s, s + r:im.shape[1] - r:2 * s, s:im.shape[2] - r:2 * s]
im[coords[0], coords[1], coords[2]] = 1 # depends on [control=['if'], data=[]]
im = ~(spim.distance_transform_edt(~im) < r)
return im |
def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Adapted from Taggit, modified to not split strings on spaces.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return []
tagstring = force_text(tagstring)
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
i = iter(tagstring)
try:
while True:
c = six.next(i)
if c == '"':
if buffer:
to_be_split.append(''.join(buffer))
buffer = []
c = six.next(i)
while c != '"':
buffer.append(c)
c = six.next(i)
if buffer:
word = ''.join(buffer).strip()
if word:
words.append(word)
buffer = []
else:
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
to_be_split.append(''.join(buffer))
if to_be_split:
for chunk in to_be_split:
words.extend(split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER']))
words = list(set(words))
words.sort()
return words | def function[parse_tags, parameter[tagstring]]:
constant[
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Adapted from Taggit, modified to not split strings on spaces.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
]
if <ast.UnaryOp object at 0x7da18ede6020> begin[:]
return[list[[]]]
variable[tagstring] assign[=] call[name[force_text], parameter[name[tagstring]]]
variable[words] assign[=] list[[]]
variable[buffer] assign[=] list[[]]
variable[to_be_split] assign[=] list[[]]
variable[i] assign[=] call[name[iter], parameter[name[tagstring]]]
<ast.Try object at 0x7da18ede66e0>
if name[to_be_split] begin[:]
for taget[name[chunk]] in starred[name[to_be_split]] begin[:]
call[name[words].extend, parameter[call[name[split_strip], parameter[name[chunk], call[name[settings].TAGGIT_SELECTIZE][constant[DELIMITER]]]]]]
variable[words] assign[=] call[name[list], parameter[call[name[set], parameter[name[words]]]]]
call[name[words].sort, parameter[]]
return[name[words]] | keyword[def] identifier[parse_tags] ( identifier[tagstring] ):
literal[string]
keyword[if] keyword[not] identifier[tagstring] :
keyword[return] []
identifier[tagstring] = identifier[force_text] ( identifier[tagstring] )
identifier[words] =[]
identifier[buffer] =[]
identifier[to_be_split] =[]
identifier[i] = identifier[iter] ( identifier[tagstring] )
keyword[try] :
keyword[while] keyword[True] :
identifier[c] = identifier[six] . identifier[next] ( identifier[i] )
keyword[if] identifier[c] == literal[string] :
keyword[if] identifier[buffer] :
identifier[to_be_split] . identifier[append] ( literal[string] . identifier[join] ( identifier[buffer] ))
identifier[buffer] =[]
identifier[c] = identifier[six] . identifier[next] ( identifier[i] )
keyword[while] identifier[c] != literal[string] :
identifier[buffer] . identifier[append] ( identifier[c] )
identifier[c] = identifier[six] . identifier[next] ( identifier[i] )
keyword[if] identifier[buffer] :
identifier[word] = literal[string] . identifier[join] ( identifier[buffer] ). identifier[strip] ()
keyword[if] identifier[word] :
identifier[words] . identifier[append] ( identifier[word] )
identifier[buffer] =[]
keyword[else] :
identifier[buffer] . identifier[append] ( identifier[c] )
keyword[except] identifier[StopIteration] :
keyword[if] identifier[buffer] :
identifier[to_be_split] . identifier[append] ( literal[string] . identifier[join] ( identifier[buffer] ))
keyword[if] identifier[to_be_split] :
keyword[for] identifier[chunk] keyword[in] identifier[to_be_split] :
identifier[words] . identifier[extend] ( identifier[split_strip] ( identifier[chunk] , identifier[settings] . identifier[TAGGIT_SELECTIZE] [ literal[string] ]))
identifier[words] = identifier[list] ( identifier[set] ( identifier[words] ))
identifier[words] . identifier[sort] ()
keyword[return] identifier[words] | def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Adapted from Taggit, modified to not split strings on spaces.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return [] # depends on [control=['if'], data=[]]
tagstring = force_text(tagstring)
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
i = iter(tagstring)
try:
while True:
c = six.next(i)
if c == '"':
if buffer:
to_be_split.append(''.join(buffer))
buffer = [] # depends on [control=['if'], data=[]]
c = six.next(i)
while c != '"':
buffer.append(c)
c = six.next(i) # depends on [control=['while'], data=['c']]
if buffer:
word = ''.join(buffer).strip()
if word:
words.append(word) # depends on [control=['if'], data=[]]
buffer = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['c']]
else:
buffer.append(c) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
to_be_split.append(''.join(buffer)) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
if to_be_split:
for chunk in to_be_split:
words.extend(split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER'])) # depends on [control=['for'], data=['chunk']] # depends on [control=['if'], data=[]]
words = list(set(words))
words.sort()
return words |
def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path) | def function[_walk_issuers, parameter[self, path, paths, failed_paths]]:
constant[
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
]
if compare[name[path].first.signature in name[self]._ca_lookup] begin[:]
call[name[paths].append, parameter[name[path]]]
return[None]
variable[new_branches] assign[=] constant[0]
for taget[name[issuer]] in starred[call[name[self]._possible_issuers, parameter[name[path].first]]] begin[:]
<ast.Try object at 0x7da1b0da1120>
if <ast.UnaryOp object at 0x7da1b0da2020> begin[:]
call[name[failed_paths].append, parameter[name[path]]] | keyword[def] identifier[_walk_issuers] ( identifier[self] , identifier[path] , identifier[paths] , identifier[failed_paths] ):
literal[string]
keyword[if] identifier[path] . identifier[first] . identifier[signature] keyword[in] identifier[self] . identifier[_ca_lookup] :
identifier[paths] . identifier[append] ( identifier[path] )
keyword[return]
identifier[new_branches] = literal[int]
keyword[for] identifier[issuer] keyword[in] identifier[self] . identifier[_possible_issuers] ( identifier[path] . identifier[first] ):
keyword[try] :
identifier[self] . identifier[_walk_issuers] ( identifier[path] . identifier[copy] (). identifier[prepend] ( identifier[issuer] ), identifier[paths] , identifier[failed_paths] )
identifier[new_branches] += literal[int]
keyword[except] ( identifier[DuplicateCertificateError] ):
keyword[pass]
keyword[if] keyword[not] identifier[new_branches] :
identifier[failed_paths] . identifier[append] ( identifier[path] ) | def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return # depends on [control=['if'], data=[]]
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1 # depends on [control=['try'], data=[]]
except DuplicateCertificateError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['issuer']]
if not new_branches:
failed_paths.append(path) # depends on [control=['if'], data=[]] |
def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:,"pertubation"] = \
self.parameter_data.parval1 + \
self.parameter_data.increment
self.parameter_data.loc[:,"out_forward"] = \
self.parameter_data.loc[:,"pertubation"] > \
self.parameter_data.loc[:,"parubnd"]
out_forward = self.parameter_data.groupby("out_forward").groups
if True in out_forward:
self.parameter_data.loc[out_forward[True],"pertubation"] = \
self.parameter_data.loc[out_forward[True],"parval1"] - \
self.parameter_data.loc[out_forward[True],"increment"]
self.parameter_data.loc[:,"out_back"] = \
self.parameter_data.loc[:,"pertubation"] < \
self.parameter_data.loc[:,"parlbnd"]
out_back = self.parameter_data.groupby("out_back").groups
if True in out_back:
still_out = out_back[True]
print(self.parameter_data.loc[still_out,:],flush=True)
raise Exception("Pst.calculate_pertubations(): " +\
"can't calc pertubations for the following "+\
"Parameters {0}".format(','.join(still_out))) | def function[calculate_pertubations, parameter[self]]:
constant[ experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
]
call[name[self].build_increments, parameter[]]
call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d28940>, <ast.Constant object at 0x7da1b1d28610>]]] assign[=] binary_operation[name[self].parameter_data.parval1 + name[self].parameter_data.increment]
call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d50910>, <ast.Constant object at 0x7da1b1d51210>]]] assign[=] compare[call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d51c90>, <ast.Constant object at 0x7da1b1d50ac0>]]] greater[>] call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d51d20>, <ast.Constant object at 0x7da1b1d506a0>]]]]
variable[out_forward] assign[=] call[name[self].parameter_data.groupby, parameter[constant[out_forward]]].groups
if compare[constant[True] in name[out_forward]] begin[:]
call[name[self].parameter_data.loc][tuple[[<ast.Subscript object at 0x7da1b1d51ba0>, <ast.Constant object at 0x7da1b1d502e0>]]] assign[=] binary_operation[call[name[self].parameter_data.loc][tuple[[<ast.Subscript object at 0x7da1b1d50670>, <ast.Constant object at 0x7da1b1d50bb0>]]] - call[name[self].parameter_data.loc][tuple[[<ast.Subscript object at 0x7da1b1d50b20>, <ast.Constant object at 0x7da1b1d50970>]]]]
call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d50d60>, <ast.Constant object at 0x7da1b1d50610>]]] assign[=] compare[call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d500d0>, <ast.Constant object at 0x7da1b1d514e0>]]] less[<] call[name[self].parameter_data.loc][tuple[[<ast.Slice object at 0x7da1b1d51510>, <ast.Constant object at 0x7da1b1d50eb0>]]]]
variable[out_back] assign[=] call[name[self].parameter_data.groupby, parameter[constant[out_back]]].groups
if compare[constant[True] in name[out_back]] begin[:]
variable[still_out] assign[=] call[name[out_back]][constant[True]]
call[name[print], parameter[call[name[self].parameter_data.loc][tuple[[<ast.Name object at 0x7da1b1d2d450>, <ast.Slice object at 0x7da1b1d2d330>]]]]]
<ast.Raise object at 0x7da1b1d2d3f0> | keyword[def] identifier[calculate_pertubations] ( identifier[self] ):
literal[string]
identifier[self] . identifier[build_increments] ()
identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]= identifier[self] . identifier[parameter_data] . identifier[parval1] + identifier[self] . identifier[parameter_data] . identifier[increment]
identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]= identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]> identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]
identifier[out_forward] = identifier[self] . identifier[parameter_data] . identifier[groupby] ( literal[string] ). identifier[groups]
keyword[if] keyword[True] keyword[in] identifier[out_forward] :
identifier[self] . identifier[parameter_data] . identifier[loc] [ identifier[out_forward] [ keyword[True] ], literal[string] ]= identifier[self] . identifier[parameter_data] . identifier[loc] [ identifier[out_forward] [ keyword[True] ], literal[string] ]- identifier[self] . identifier[parameter_data] . identifier[loc] [ identifier[out_forward] [ keyword[True] ], literal[string] ]
identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]= identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]< identifier[self] . identifier[parameter_data] . identifier[loc] [:, literal[string] ]
identifier[out_back] = identifier[self] . identifier[parameter_data] . identifier[groupby] ( literal[string] ). identifier[groups]
keyword[if] keyword[True] keyword[in] identifier[out_back] :
identifier[still_out] = identifier[out_back] [ keyword[True] ]
identifier[print] ( identifier[self] . identifier[parameter_data] . identifier[loc] [ identifier[still_out] ,:], identifier[flush] = keyword[True] )
keyword[raise] identifier[Exception] ( literal[string] + literal[string] + literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[still_out] ))) | def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:, 'pertubation'] = self.parameter_data.parval1 + self.parameter_data.increment
self.parameter_data.loc[:, 'out_forward'] = self.parameter_data.loc[:, 'pertubation'] > self.parameter_data.loc[:, 'parubnd']
out_forward = self.parameter_data.groupby('out_forward').groups
if True in out_forward:
self.parameter_data.loc[out_forward[True], 'pertubation'] = self.parameter_data.loc[out_forward[True], 'parval1'] - self.parameter_data.loc[out_forward[True], 'increment']
self.parameter_data.loc[:, 'out_back'] = self.parameter_data.loc[:, 'pertubation'] < self.parameter_data.loc[:, 'parlbnd']
out_back = self.parameter_data.groupby('out_back').groups
if True in out_back:
still_out = out_back[True]
print(self.parameter_data.loc[still_out, :], flush=True)
raise Exception('Pst.calculate_pertubations(): ' + "can't calc pertubations for the following " + 'Parameters {0}'.format(','.join(still_out))) # depends on [control=['if'], data=['out_back']] # depends on [control=['if'], data=['out_forward']] |
def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
siblings = SiblingAddresses(spec_path)
for address in self._build_graph.inject_specs_closure([siblings]):
yield self._build_graph.get_target(address)
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target | def function[iter_target_siblings_and_ancestors, parameter[self, target]]:
constant[Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
]
def function[iter_targets_in_spec_path, parameter[spec_path]]:
<ast.Try object at 0x7da1b1e68370>
def function[iter_siblings_and_ancestors, parameter[spec_path]]:
for taget[name[sibling]] in starred[call[name[iter_targets_in_spec_path], parameter[name[spec_path]]]] begin[:]
<ast.Yield object at 0x7da1b1e68af0>
variable[parent_spec_path] assign[=] call[name[os].path.dirname, parameter[name[spec_path]]]
if compare[name[parent_spec_path] not_equal[!=] name[spec_path]] begin[:]
for taget[name[parent]] in starred[call[name[iter_siblings_and_ancestors], parameter[name[parent_spec_path]]]] begin[:]
<ast.Yield object at 0x7da1b1e6acb0>
for taget[name[target]] in starred[call[name[iter_siblings_and_ancestors], parameter[name[target].address.spec_path]]] begin[:]
<ast.Yield object at 0x7da1b1e68ac0> | keyword[def] identifier[iter_target_siblings_and_ancestors] ( identifier[self] , identifier[target] ):
literal[string]
keyword[def] identifier[iter_targets_in_spec_path] ( identifier[spec_path] ):
keyword[try] :
identifier[siblings] = identifier[SiblingAddresses] ( identifier[spec_path] )
keyword[for] identifier[address] keyword[in] identifier[self] . identifier[_build_graph] . identifier[inject_specs_closure] ([ identifier[siblings] ]):
keyword[yield] identifier[self] . identifier[_build_graph] . identifier[get_target] ( identifier[address] )
keyword[except] identifier[AddressLookupError] :
keyword[pass]
keyword[def] identifier[iter_siblings_and_ancestors] ( identifier[spec_path] ):
keyword[for] identifier[sibling] keyword[in] identifier[iter_targets_in_spec_path] ( identifier[spec_path] ):
keyword[yield] identifier[sibling]
identifier[parent_spec_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[spec_path] )
keyword[if] identifier[parent_spec_path] != identifier[spec_path] :
keyword[for] identifier[parent] keyword[in] identifier[iter_siblings_and_ancestors] ( identifier[parent_spec_path] ):
keyword[yield] identifier[parent]
keyword[for] identifier[target] keyword[in] identifier[iter_siblings_and_ancestors] ( identifier[target] . identifier[address] . identifier[spec_path] ):
keyword[yield] identifier[target] | def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
siblings = SiblingAddresses(spec_path)
for address in self._build_graph.inject_specs_closure([siblings]):
yield self._build_graph.get_target(address) # depends on [control=['for'], data=['address']] # depends on [control=['try'], data=[]]
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass # depends on [control=['except'], data=[]]
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling # depends on [control=['for'], data=['sibling']]
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent # depends on [control=['for'], data=['parent']] # depends on [control=['if'], data=['parent_spec_path']]
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target # depends on [control=['for'], data=['target']] |
async def restart_stream(self):
"""
Restart the stream on error
"""
await self.response.release()
await asyncio.sleep(self._error_timeout)
await self.connect()
logger.info("Reconnected to the stream")
self._reconnecting = False
return {'stream_restart': True} | <ast.AsyncFunctionDef object at 0x7da1b01e4100> | keyword[async] keyword[def] identifier[restart_stream] ( identifier[self] ):
literal[string]
keyword[await] identifier[self] . identifier[response] . identifier[release] ()
keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[self] . identifier[_error_timeout] )
keyword[await] identifier[self] . identifier[connect] ()
identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[_reconnecting] = keyword[False]
keyword[return] { literal[string] : keyword[True] } | async def restart_stream(self):
"""
Restart the stream on error
"""
await self.response.release()
await asyncio.sleep(self._error_timeout)
await self.connect()
logger.info('Reconnected to the stream')
self._reconnecting = False
return {'stream_restart': True} |
def use_internal_state(self):
"""Use a specific RNG state."""
old_state = random.getstate()
random.setstate(self._random_state)
yield
self._random_state = random.getstate()
random.setstate(old_state) | def function[use_internal_state, parameter[self]]:
constant[Use a specific RNG state.]
variable[old_state] assign[=] call[name[random].getstate, parameter[]]
call[name[random].setstate, parameter[name[self]._random_state]]
<ast.Yield object at 0x7da18eb557b0>
name[self]._random_state assign[=] call[name[random].getstate, parameter[]]
call[name[random].setstate, parameter[name[old_state]]] | keyword[def] identifier[use_internal_state] ( identifier[self] ):
literal[string]
identifier[old_state] = identifier[random] . identifier[getstate] ()
identifier[random] . identifier[setstate] ( identifier[self] . identifier[_random_state] )
keyword[yield]
identifier[self] . identifier[_random_state] = identifier[random] . identifier[getstate] ()
identifier[random] . identifier[setstate] ( identifier[old_state] ) | def use_internal_state(self):
"""Use a specific RNG state."""
old_state = random.getstate()
random.setstate(self._random_state)
yield
self._random_state = random.getstate()
random.setstate(old_state) |
def stop_jobs(self, job_ids):
"""Stops the jobs in the grid."""
self.lock()
jobs = self.get_jobs(job_ids)
for job in jobs:
if job.status in ('executing', 'queued', 'waiting'):
qdel(job.id, context=self.context)
logger.info("Stopped job '%s' in the SGE grid." % job)
job.submit()
self.session.commit()
self.unlock() | def function[stop_jobs, parameter[self, job_ids]]:
constant[Stops the jobs in the grid.]
call[name[self].lock, parameter[]]
variable[jobs] assign[=] call[name[self].get_jobs, parameter[name[job_ids]]]
for taget[name[job]] in starred[name[jobs]] begin[:]
if compare[name[job].status in tuple[[<ast.Constant object at 0x7da18bcc8880>, <ast.Constant object at 0x7da18bccb700>, <ast.Constant object at 0x7da18bccaa40>]]] begin[:]
call[name[qdel], parameter[name[job].id]]
call[name[logger].info, parameter[binary_operation[constant[Stopped job '%s' in the SGE grid.] <ast.Mod object at 0x7da2590d6920> name[job]]]]
call[name[job].submit, parameter[]]
call[name[self].session.commit, parameter[]]
call[name[self].unlock, parameter[]] | keyword[def] identifier[stop_jobs] ( identifier[self] , identifier[job_ids] ):
literal[string]
identifier[self] . identifier[lock] ()
identifier[jobs] = identifier[self] . identifier[get_jobs] ( identifier[job_ids] )
keyword[for] identifier[job] keyword[in] identifier[jobs] :
keyword[if] identifier[job] . identifier[status] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[qdel] ( identifier[job] . identifier[id] , identifier[context] = identifier[self] . identifier[context] )
identifier[logger] . identifier[info] ( literal[string] % identifier[job] )
identifier[job] . identifier[submit] ()
identifier[self] . identifier[session] . identifier[commit] ()
identifier[self] . identifier[unlock] () | def stop_jobs(self, job_ids):
"""Stops the jobs in the grid."""
self.lock()
jobs = self.get_jobs(job_ids)
for job in jobs:
if job.status in ('executing', 'queued', 'waiting'):
qdel(job.id, context=self.context)
logger.info("Stopped job '%s' in the SGE grid." % job)
job.submit() # depends on [control=['if'], data=[]]
self.session.commit() # depends on [control=['for'], data=['job']]
self.unlock() |
def expand_file_arguments():
""" Any argument starting with "@" gets replaced with all values read from a text file.
Text file arguments can be split by newline or by space.
Values are added "as-is", as if they were specified in this order on the command line.
"""
new_args = []
expanded = False
for arg in sys.argv:
if arg.startswith("@"):
expanded = True
with open(arg[1:],"r") as f:
for line in f.readlines():
new_args += shlex.split(line)
else:
new_args.append(arg)
if expanded:
print("esptool.py %s" % (" ".join(new_args[1:])))
sys.argv = new_args | def function[expand_file_arguments, parameter[]]:
constant[ Any argument starting with "@" gets replaced with all values read from a text file.
Text file arguments can be split by newline or by space.
Values are added "as-is", as if they were specified in this order on the command line.
]
variable[new_args] assign[=] list[[]]
variable[expanded] assign[=] constant[False]
for taget[name[arg]] in starred[name[sys].argv] begin[:]
if call[name[arg].startswith, parameter[constant[@]]] begin[:]
variable[expanded] assign[=] constant[True]
with call[name[open], parameter[call[name[arg]][<ast.Slice object at 0x7da204623040>], constant[r]]] begin[:]
for taget[name[line]] in starred[call[name[f].readlines, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da2046221d0>
if name[expanded] begin[:]
call[name[print], parameter[binary_operation[constant[esptool.py %s] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[call[name[new_args]][<ast.Slice object at 0x7da204623130>]]]]]]
name[sys].argv assign[=] name[new_args] | keyword[def] identifier[expand_file_arguments] ():
literal[string]
identifier[new_args] =[]
identifier[expanded] = keyword[False]
keyword[for] identifier[arg] keyword[in] identifier[sys] . identifier[argv] :
keyword[if] identifier[arg] . identifier[startswith] ( literal[string] ):
identifier[expanded] = keyword[True]
keyword[with] identifier[open] ( identifier[arg] [ literal[int] :], literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] . identifier[readlines] ():
identifier[new_args] += identifier[shlex] . identifier[split] ( identifier[line] )
keyword[else] :
identifier[new_args] . identifier[append] ( identifier[arg] )
keyword[if] identifier[expanded] :
identifier[print] ( literal[string] %( literal[string] . identifier[join] ( identifier[new_args] [ literal[int] :])))
identifier[sys] . identifier[argv] = identifier[new_args] | def expand_file_arguments():
""" Any argument starting with "@" gets replaced with all values read from a text file.
Text file arguments can be split by newline or by space.
Values are added "as-is", as if they were specified in this order on the command line.
"""
new_args = []
expanded = False
for arg in sys.argv:
if arg.startswith('@'):
expanded = True
with open(arg[1:], 'r') as f:
for line in f.readlines():
new_args += shlex.split(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
new_args.append(arg) # depends on [control=['for'], data=['arg']]
if expanded:
print('esptool.py %s' % ' '.join(new_args[1:]))
sys.argv = new_args # depends on [control=['if'], data=[]] |
def calc_check_digit(digits):
"""Calculate and return the GS1 check digit."""
ints = [int(d) for d in digits]
l = len(ints)
odds = slice((l - 1) % 2, l, 2)
even = slice(l % 2, l, 2)
checksum = 3 * sum(ints[odds]) + sum(ints[even])
return str(-checksum % 10) | def function[calc_check_digit, parameter[digits]]:
constant[Calculate and return the GS1 check digit.]
variable[ints] assign[=] <ast.ListComp object at 0x7da2044c1780>
variable[l] assign[=] call[name[len], parameter[name[ints]]]
variable[odds] assign[=] call[name[slice], parameter[binary_operation[binary_operation[name[l] - constant[1]] <ast.Mod object at 0x7da2590d6920> constant[2]], name[l], constant[2]]]
variable[even] assign[=] call[name[slice], parameter[binary_operation[name[l] <ast.Mod object at 0x7da2590d6920> constant[2]], name[l], constant[2]]]
variable[checksum] assign[=] binary_operation[binary_operation[constant[3] * call[name[sum], parameter[call[name[ints]][name[odds]]]]] + call[name[sum], parameter[call[name[ints]][name[even]]]]]
return[call[name[str], parameter[binary_operation[<ast.UnaryOp object at 0x7da18eb572b0> <ast.Mod object at 0x7da2590d6920> constant[10]]]]] | keyword[def] identifier[calc_check_digit] ( identifier[digits] ):
literal[string]
identifier[ints] =[ identifier[int] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[digits] ]
identifier[l] = identifier[len] ( identifier[ints] )
identifier[odds] = identifier[slice] (( identifier[l] - literal[int] )% literal[int] , identifier[l] , literal[int] )
identifier[even] = identifier[slice] ( identifier[l] % literal[int] , identifier[l] , literal[int] )
identifier[checksum] = literal[int] * identifier[sum] ( identifier[ints] [ identifier[odds] ])+ identifier[sum] ( identifier[ints] [ identifier[even] ])
keyword[return] identifier[str] (- identifier[checksum] % literal[int] ) | def calc_check_digit(digits):
"""Calculate and return the GS1 check digit."""
ints = [int(d) for d in digits]
l = len(ints)
odds = slice((l - 1) % 2, l, 2)
even = slice(l % 2, l, 2)
checksum = 3 * sum(ints[odds]) + sum(ints[even])
return str(-checksum % 10) |
def _clean_index(key, length):
"""
Validates and normalizes a cell range index.
>>> _clean_index(0, 10)
0
>>> _clean_index(-10, 10)
0
>>> _clean_index(10, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(-11, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(None, 10)
Traceback (most recent call last):
...
TypeError: Cell indices must be integers, NoneType given.
"""
if not isinstance(key, integer_types):
raise TypeError('Cell indices must be integers, %s given.' % type(key).__name__)
if -length <= key < 0:
return key + length
elif 0 <= key < length:
return key
else:
raise IndexError('Cell index out of range.') | def function[_clean_index, parameter[key, length]]:
constant[
Validates and normalizes a cell range index.
>>> _clean_index(0, 10)
0
>>> _clean_index(-10, 10)
0
>>> _clean_index(10, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(-11, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(None, 10)
Traceback (most recent call last):
...
TypeError: Cell indices must be integers, NoneType given.
]
if <ast.UnaryOp object at 0x7da1b0c53f70> begin[:]
<ast.Raise object at 0x7da1b0b81e70>
if compare[<ast.UnaryOp object at 0x7da1b0e26230> less_or_equal[<=] name[key]] begin[:]
return[binary_operation[name[key] + name[length]]] | keyword[def] identifier[_clean_index] ( identifier[key] , identifier[length] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[integer_types] ):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[key] ). identifier[__name__] )
keyword[if] - identifier[length] <= identifier[key] < literal[int] :
keyword[return] identifier[key] + identifier[length]
keyword[elif] literal[int] <= identifier[key] < identifier[length] :
keyword[return] identifier[key]
keyword[else] :
keyword[raise] identifier[IndexError] ( literal[string] ) | def _clean_index(key, length):
"""
Validates and normalizes a cell range index.
>>> _clean_index(0, 10)
0
>>> _clean_index(-10, 10)
0
>>> _clean_index(10, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(-11, 10)
Traceback (most recent call last):
...
IndexError: Cell index out of range.
>>> _clean_index(None, 10)
Traceback (most recent call last):
...
TypeError: Cell indices must be integers, NoneType given.
"""
if not isinstance(key, integer_types):
raise TypeError('Cell indices must be integers, %s given.' % type(key).__name__) # depends on [control=['if'], data=[]]
if -length <= key < 0:
return key + length # depends on [control=['if'], data=['key']]
elif 0 <= key < length:
return key # depends on [control=['if'], data=['key']]
else:
raise IndexError('Cell index out of range.') |
def execute(query, auth=None, client=urllib_request.build_opener()):
"""Execute a query, returning its result
Parameters
----------
query: Query[T]
The query to resolve
auth: ~typing.Tuple[str, str] \
or ~typing.Callable[[Request], Request] or None
This may be:
* A (username, password)-tuple for basic authentication
* A callable to authenticate requests.
* ``None`` (no authentication)
client
The HTTP client to use.
Its type must have been registered
with :func:`~snug.clients.send`.
If not given, the built-in :mod:`urllib` module is used.
Returns
-------
T
the query result
"""
exec_fn = getattr(type(query), '__execute__', _default_execute_method)
return exec_fn(query, client, _make_auth(auth)) | def function[execute, parameter[query, auth, client]]:
constant[Execute a query, returning its result
Parameters
----------
query: Query[T]
The query to resolve
auth: ~typing.Tuple[str, str] or ~typing.Callable[[Request], Request] or None
This may be:
* A (username, password)-tuple for basic authentication
* A callable to authenticate requests.
* ``None`` (no authentication)
client
The HTTP client to use.
Its type must have been registered
with :func:`~snug.clients.send`.
If not given, the built-in :mod:`urllib` module is used.
Returns
-------
T
the query result
]
variable[exec_fn] assign[=] call[name[getattr], parameter[call[name[type], parameter[name[query]]], constant[__execute__], name[_default_execute_method]]]
return[call[name[exec_fn], parameter[name[query], name[client], call[name[_make_auth], parameter[name[auth]]]]]] | keyword[def] identifier[execute] ( identifier[query] , identifier[auth] = keyword[None] , identifier[client] = identifier[urllib_request] . identifier[build_opener] ()):
literal[string]
identifier[exec_fn] = identifier[getattr] ( identifier[type] ( identifier[query] ), literal[string] , identifier[_default_execute_method] )
keyword[return] identifier[exec_fn] ( identifier[query] , identifier[client] , identifier[_make_auth] ( identifier[auth] )) | def execute(query, auth=None, client=urllib_request.build_opener()):
"""Execute a query, returning its result
Parameters
----------
query: Query[T]
The query to resolve
auth: ~typing.Tuple[str, str] or ~typing.Callable[[Request], Request] or None
This may be:
* A (username, password)-tuple for basic authentication
* A callable to authenticate requests.
* ``None`` (no authentication)
client
The HTTP client to use.
Its type must have been registered
with :func:`~snug.clients.send`.
If not given, the built-in :mod:`urllib` module is used.
Returns
-------
T
the query result
"""
exec_fn = getattr(type(query), '__execute__', _default_execute_method)
return exec_fn(query, client, _make_auth(auth)) |
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert | def function[lookup_cert, parameter[self, key]]:
constant[
Search through keys
]
if <ast.UnaryOp object at 0x7da1b1598e20> begin[:]
call[name[log].critical, parameter[constant[No consumers defined in settings.Have you created a configuration file?]]]
return[constant[None]]
variable[consumer] assign[=] call[name[self].consumers.get, parameter[name[key]]]
if <ast.UnaryOp object at 0x7da1b15992a0> begin[:]
call[name[log].info, parameter[constant[Did not find consumer, using key: %s ], name[key]]]
return[constant[None]]
variable[cert] assign[=] call[name[consumer].get, parameter[constant[cert], constant[None]]]
return[name[cert]] | keyword[def] identifier[lookup_cert] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[consumers] :
identifier[log] . identifier[critical] (( literal[string]
literal[string] ))
keyword[return] keyword[None]
identifier[consumer] = identifier[self] . identifier[consumers] . identifier[get] ( identifier[key] )
keyword[if] keyword[not] identifier[consumer] :
identifier[log] . identifier[info] ( literal[string] , identifier[key] )
keyword[return] keyword[None]
identifier[cert] = identifier[consumer] . identifier[get] ( literal[string] , keyword[None] )
keyword[return] identifier[cert] | def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical('No consumers defined in settings.Have you created a configuration file?')
return None # depends on [control=['if'], data=[]]
consumer = self.consumers.get(key)
if not consumer:
log.info('Did not find consumer, using key: %s ', key)
return None # depends on [control=['if'], data=[]]
cert = consumer.get('cert', None)
return cert |
def complete_variable(text):
'''complete a MAVLink variable or expression'''
if text == '':
return list(rline_mpstate.status.msgs.keys())
if text.endswith(":2"):
suffix = ":2"
text = text[:-2]
else:
suffix = ''
try:
if mavutil.evaluate_expression(text, rline_mpstate.status.msgs) is not None:
return [text+suffix]
except Exception as ex:
pass
try:
m1 = re.match("^(.*?)([A-Z0-9][A-Z0-9_]*)[.]([A-Za-z0-9_]*)$", text)
except Exception as ex:
return []
if m1 is not None:
prefix = m1.group(1)
mtype = m1.group(2)
fname = m1.group(3)
if mtype in rline_mpstate.status.msgs:
ret = []
for f in rline_mpstate.status.msgs[mtype].get_fieldnames():
if f.startswith(fname):
ret.append(prefix + mtype + '.' + f + suffix)
return ret
return []
try:
m2 = re.match("^(.*?)([A-Z0-9][A-Z0-9_]*)$", text)
except Exception as ex:
return []
prefix = m2.group(1)
mtype = m2.group(2)
ret = []
for k in list(rline_mpstate.status.msgs.keys()):
if k.startswith(mtype):
ret.append(prefix + k + suffix)
return ret | def function[complete_variable, parameter[text]]:
constant[complete a MAVLink variable or expression]
if compare[name[text] equal[==] constant[]] begin[:]
return[call[name[list], parameter[call[name[rline_mpstate].status.msgs.keys, parameter[]]]]]
if call[name[text].endswith, parameter[constant[:2]]] begin[:]
variable[suffix] assign[=] constant[:2]
variable[text] assign[=] call[name[text]][<ast.Slice object at 0x7da20c76dfc0>]
<ast.Try object at 0x7da20c76d810>
<ast.Try object at 0x7da20c76e200>
if compare[name[m1] is_not constant[None]] begin[:]
variable[prefix] assign[=] call[name[m1].group, parameter[constant[1]]]
variable[mtype] assign[=] call[name[m1].group, parameter[constant[2]]]
variable[fname] assign[=] call[name[m1].group, parameter[constant[3]]]
if compare[name[mtype] in name[rline_mpstate].status.msgs] begin[:]
variable[ret] assign[=] list[[]]
for taget[name[f]] in starred[call[call[name[rline_mpstate].status.msgs][name[mtype]].get_fieldnames, parameter[]]] begin[:]
if call[name[f].startswith, parameter[name[fname]]] begin[:]
call[name[ret].append, parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[prefix] + name[mtype]] + constant[.]] + name[f]] + name[suffix]]]]
return[name[ret]]
return[list[[]]]
<ast.Try object at 0x7da20c76d9f0>
variable[prefix] assign[=] call[name[m2].group, parameter[constant[1]]]
variable[mtype] assign[=] call[name[m2].group, parameter[constant[2]]]
variable[ret] assign[=] list[[]]
for taget[name[k]] in starred[call[name[list], parameter[call[name[rline_mpstate].status.msgs.keys, parameter[]]]]] begin[:]
if call[name[k].startswith, parameter[name[mtype]]] begin[:]
call[name[ret].append, parameter[binary_operation[binary_operation[name[prefix] + name[k]] + name[suffix]]]]
return[name[ret]] | keyword[def] identifier[complete_variable] ( identifier[text] ):
literal[string]
keyword[if] identifier[text] == literal[string] :
keyword[return] identifier[list] ( identifier[rline_mpstate] . identifier[status] . identifier[msgs] . identifier[keys] ())
keyword[if] identifier[text] . identifier[endswith] ( literal[string] ):
identifier[suffix] = literal[string]
identifier[text] = identifier[text] [:- literal[int] ]
keyword[else] :
identifier[suffix] = literal[string]
keyword[try] :
keyword[if] identifier[mavutil] . identifier[evaluate_expression] ( identifier[text] , identifier[rline_mpstate] . identifier[status] . identifier[msgs] ) keyword[is] keyword[not] keyword[None] :
keyword[return] [ identifier[text] + identifier[suffix] ]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[pass]
keyword[try] :
identifier[m1] = identifier[re] . identifier[match] ( literal[string] , identifier[text] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[return] []
keyword[if] identifier[m1] keyword[is] keyword[not] keyword[None] :
identifier[prefix] = identifier[m1] . identifier[group] ( literal[int] )
identifier[mtype] = identifier[m1] . identifier[group] ( literal[int] )
identifier[fname] = identifier[m1] . identifier[group] ( literal[int] )
keyword[if] identifier[mtype] keyword[in] identifier[rline_mpstate] . identifier[status] . identifier[msgs] :
identifier[ret] =[]
keyword[for] identifier[f] keyword[in] identifier[rline_mpstate] . identifier[status] . identifier[msgs] [ identifier[mtype] ]. identifier[get_fieldnames] ():
keyword[if] identifier[f] . identifier[startswith] ( identifier[fname] ):
identifier[ret] . identifier[append] ( identifier[prefix] + identifier[mtype] + literal[string] + identifier[f] + identifier[suffix] )
keyword[return] identifier[ret]
keyword[return] []
keyword[try] :
identifier[m2] = identifier[re] . identifier[match] ( literal[string] , identifier[text] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[return] []
identifier[prefix] = identifier[m2] . identifier[group] ( literal[int] )
identifier[mtype] = identifier[m2] . identifier[group] ( literal[int] )
identifier[ret] =[]
keyword[for] identifier[k] keyword[in] identifier[list] ( identifier[rline_mpstate] . identifier[status] . identifier[msgs] . identifier[keys] ()):
keyword[if] identifier[k] . identifier[startswith] ( identifier[mtype] ):
identifier[ret] . identifier[append] ( identifier[prefix] + identifier[k] + identifier[suffix] )
keyword[return] identifier[ret] | def complete_variable(text):
"""complete a MAVLink variable or expression"""
if text == '':
return list(rline_mpstate.status.msgs.keys()) # depends on [control=['if'], data=[]]
if text.endswith(':2'):
suffix = ':2'
text = text[:-2] # depends on [control=['if'], data=[]]
else:
suffix = ''
try:
if mavutil.evaluate_expression(text, rline_mpstate.status.msgs) is not None:
return [text + suffix] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as ex:
pass # depends on [control=['except'], data=[]]
try:
m1 = re.match('^(.*?)([A-Z0-9][A-Z0-9_]*)[.]([A-Za-z0-9_]*)$', text) # depends on [control=['try'], data=[]]
except Exception as ex:
return [] # depends on [control=['except'], data=[]]
if m1 is not None:
prefix = m1.group(1)
mtype = m1.group(2)
fname = m1.group(3)
if mtype in rline_mpstate.status.msgs:
ret = []
for f in rline_mpstate.status.msgs[mtype].get_fieldnames():
if f.startswith(fname):
ret.append(prefix + mtype + '.' + f + suffix) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return ret # depends on [control=['if'], data=['mtype']]
return [] # depends on [control=['if'], data=['m1']]
try:
m2 = re.match('^(.*?)([A-Z0-9][A-Z0-9_]*)$', text) # depends on [control=['try'], data=[]]
except Exception as ex:
return [] # depends on [control=['except'], data=[]]
prefix = m2.group(1)
mtype = m2.group(2)
ret = []
for k in list(rline_mpstate.status.msgs.keys()):
if k.startswith(mtype):
ret.append(prefix + k + suffix) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return ret |
def export(datastore_key, calc_id=-1, exports='csv', export_dir='.'):
"""
Export an output from the datastore.
"""
dstore = util.read(calc_id)
parent_id = dstore['oqparam'].hazard_calculation_id
if parent_id:
dstore.parent = util.read(parent_id)
dstore.export_dir = export_dir
with performance.Monitor('export', measuremem=True) as mon:
for fmt in exports.split(','):
fnames = export_((datastore_key, fmt), dstore)
nbytes = sum(os.path.getsize(f) for f in fnames)
print('Exported %s in %s' % (general.humansize(nbytes), fnames))
if mon.duration > 1:
print(mon)
dstore.close() | def function[export, parameter[datastore_key, calc_id, exports, export_dir]]:
constant[
Export an output from the datastore.
]
variable[dstore] assign[=] call[name[util].read, parameter[name[calc_id]]]
variable[parent_id] assign[=] call[name[dstore]][constant[oqparam]].hazard_calculation_id
if name[parent_id] begin[:]
name[dstore].parent assign[=] call[name[util].read, parameter[name[parent_id]]]
name[dstore].export_dir assign[=] name[export_dir]
with call[name[performance].Monitor, parameter[constant[export]]] begin[:]
for taget[name[fmt]] in starred[call[name[exports].split, parameter[constant[,]]]] begin[:]
variable[fnames] assign[=] call[name[export_], parameter[tuple[[<ast.Name object at 0x7da18bcc81c0>, <ast.Name object at 0x7da18bcc9b40>]], name[dstore]]]
variable[nbytes] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18bcc9c30>]]
call[name[print], parameter[binary_operation[constant[Exported %s in %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18bcc85e0>, <ast.Name object at 0x7da18bccbca0>]]]]]
if compare[name[mon].duration greater[>] constant[1]] begin[:]
call[name[print], parameter[name[mon]]]
call[name[dstore].close, parameter[]] | keyword[def] identifier[export] ( identifier[datastore_key] , identifier[calc_id] =- literal[int] , identifier[exports] = literal[string] , identifier[export_dir] = literal[string] ):
literal[string]
identifier[dstore] = identifier[util] . identifier[read] ( identifier[calc_id] )
identifier[parent_id] = identifier[dstore] [ literal[string] ]. identifier[hazard_calculation_id]
keyword[if] identifier[parent_id] :
identifier[dstore] . identifier[parent] = identifier[util] . identifier[read] ( identifier[parent_id] )
identifier[dstore] . identifier[export_dir] = identifier[export_dir]
keyword[with] identifier[performance] . identifier[Monitor] ( literal[string] , identifier[measuremem] = keyword[True] ) keyword[as] identifier[mon] :
keyword[for] identifier[fmt] keyword[in] identifier[exports] . identifier[split] ( literal[string] ):
identifier[fnames] = identifier[export_] (( identifier[datastore_key] , identifier[fmt] ), identifier[dstore] )
identifier[nbytes] = identifier[sum] ( identifier[os] . identifier[path] . identifier[getsize] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[fnames] )
identifier[print] ( literal[string] %( identifier[general] . identifier[humansize] ( identifier[nbytes] ), identifier[fnames] ))
keyword[if] identifier[mon] . identifier[duration] > literal[int] :
identifier[print] ( identifier[mon] )
identifier[dstore] . identifier[close] () | def export(datastore_key, calc_id=-1, exports='csv', export_dir='.'):
"""
Export an output from the datastore.
"""
dstore = util.read(calc_id)
parent_id = dstore['oqparam'].hazard_calculation_id
if parent_id:
dstore.parent = util.read(parent_id) # depends on [control=['if'], data=[]]
dstore.export_dir = export_dir
with performance.Monitor('export', measuremem=True) as mon:
for fmt in exports.split(','):
fnames = export_((datastore_key, fmt), dstore)
nbytes = sum((os.path.getsize(f) for f in fnames))
print('Exported %s in %s' % (general.humansize(nbytes), fnames)) # depends on [control=['for'], data=['fmt']] # depends on [control=['with'], data=[]]
if mon.duration > 1:
print(mon) # depends on [control=['if'], data=[]]
dstore.close() |
def getattr_sdk(attr, name):
"""
Filter SDK attributes
Args:
attr(attribute): Attribute as returned by :func:`getattr`.
name(str): Attribute name.
Returns:
`attr` if passed.
"""
if inspect.isroutine(attr):
if hasattr(attr, '_sdkmeta'):
return attr
raise AttributeError(name) | def function[getattr_sdk, parameter[attr, name]]:
constant[
Filter SDK attributes
Args:
attr(attribute): Attribute as returned by :func:`getattr`.
name(str): Attribute name.
Returns:
`attr` if passed.
]
if call[name[inspect].isroutine, parameter[name[attr]]] begin[:]
if call[name[hasattr], parameter[name[attr], constant[_sdkmeta]]] begin[:]
return[name[attr]]
<ast.Raise object at 0x7da18f721300> | keyword[def] identifier[getattr_sdk] ( identifier[attr] , identifier[name] ):
literal[string]
keyword[if] identifier[inspect] . identifier[isroutine] ( identifier[attr] ):
keyword[if] identifier[hasattr] ( identifier[attr] , literal[string] ):
keyword[return] identifier[attr]
keyword[raise] identifier[AttributeError] ( identifier[name] ) | def getattr_sdk(attr, name):
"""
Filter SDK attributes
Args:
attr(attribute): Attribute as returned by :func:`getattr`.
name(str): Attribute name.
Returns:
`attr` if passed.
"""
if inspect.isroutine(attr):
if hasattr(attr, '_sdkmeta'):
return attr # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
raise AttributeError(name) |
def saveLogs(self, filename) :
"""dumps logs into a nice pickle"""
f = open(filename, 'wb')
cPickle.dump(self.logs, f)
f.close() | def function[saveLogs, parameter[self, filename]]:
constant[dumps logs into a nice pickle]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[wb]]]
call[name[cPickle].dump, parameter[name[self].logs, name[f]]]
call[name[f].close, parameter[]] | keyword[def] identifier[saveLogs] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[cPickle] . identifier[dump] ( identifier[self] . identifier[logs] , identifier[f] )
identifier[f] . identifier[close] () | def saveLogs(self, filename):
"""dumps logs into a nice pickle"""
f = open(filename, 'wb')
cPickle.dump(self.logs, f)
f.close() |
def delete_jobs(self,
user_ids,
job_ids,
task_ids,
labels,
create_time_min=None,
create_time_max=None):
"""Kills the operations associated with the specified job or job.task.
Args:
user_ids: List of user ids who "own" the job(s) to cancel.
job_ids: List of job_ids to cancel.
task_ids: List of task-ids to cancel.
labels: List of LabelParam, each must match the job(s) to be canceled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent
create time of a task, inclusive.
Returns:
A list of tasks canceled and a list of error messages.
"""
# Look up the job(s)
tasks = list(
self.lookup_job_tasks(
{'RUNNING'},
user_ids=user_ids,
job_ids=job_ids,
task_ids=task_ids,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max))
print('Found %d tasks to delete.' % len(tasks))
return google_base.cancel(self._service.new_batch_http_request,
self._service.operations().cancel, tasks) | def function[delete_jobs, parameter[self, user_ids, job_ids, task_ids, labels, create_time_min, create_time_max]]:
constant[Kills the operations associated with the specified job or job.task.
Args:
user_ids: List of user ids who "own" the job(s) to cancel.
job_ids: List of job_ids to cancel.
task_ids: List of task-ids to cancel.
labels: List of LabelParam, each must match the job(s) to be canceled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent
create time of a task, inclusive.
Returns:
A list of tasks canceled and a list of error messages.
]
variable[tasks] assign[=] call[name[list], parameter[call[name[self].lookup_job_tasks, parameter[<ast.Set object at 0x7da1b0057dc0>]]]]
call[name[print], parameter[binary_operation[constant[Found %d tasks to delete.] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[tasks]]]]]]
return[call[name[google_base].cancel, parameter[name[self]._service.new_batch_http_request, call[name[self]._service.operations, parameter[]].cancel, name[tasks]]]] | keyword[def] identifier[delete_jobs] ( identifier[self] ,
identifier[user_ids] ,
identifier[job_ids] ,
identifier[task_ids] ,
identifier[labels] ,
identifier[create_time_min] = keyword[None] ,
identifier[create_time_max] = keyword[None] ):
literal[string]
identifier[tasks] = identifier[list] (
identifier[self] . identifier[lookup_job_tasks] (
{ literal[string] },
identifier[user_ids] = identifier[user_ids] ,
identifier[job_ids] = identifier[job_ids] ,
identifier[task_ids] = identifier[task_ids] ,
identifier[labels] = identifier[labels] ,
identifier[create_time_min] = identifier[create_time_min] ,
identifier[create_time_max] = identifier[create_time_max] ))
identifier[print] ( literal[string] % identifier[len] ( identifier[tasks] ))
keyword[return] identifier[google_base] . identifier[cancel] ( identifier[self] . identifier[_service] . identifier[new_batch_http_request] ,
identifier[self] . identifier[_service] . identifier[operations] (). identifier[cancel] , identifier[tasks] ) | def delete_jobs(self, user_ids, job_ids, task_ids, labels, create_time_min=None, create_time_max=None):
"""Kills the operations associated with the specified job or job.task.
Args:
user_ids: List of user ids who "own" the job(s) to cancel.
job_ids: List of job_ids to cancel.
task_ids: List of task-ids to cancel.
labels: List of LabelParam, each must match the job(s) to be canceled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent
create time of a task, inclusive.
Returns:
A list of tasks canceled and a list of error messages.
"""
# Look up the job(s)
tasks = list(self.lookup_job_tasks({'RUNNING'}, user_ids=user_ids, job_ids=job_ids, task_ids=task_ids, labels=labels, create_time_min=create_time_min, create_time_max=create_time_max))
print('Found %d tasks to delete.' % len(tasks))
return google_base.cancel(self._service.new_batch_http_request, self._service.operations().cancel, tasks) |
def encode_hook(self, hook, msg):
"""
Encodes a commit hook dict into the protobuf message. Used in
bucket properties.
:param hook: the hook to encode
:type hook: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbCommitHook
:rtype riak.pb.riak_pb2.RpbCommitHook
"""
if 'name' in hook:
msg.name = str_to_bytes(hook['name'])
else:
self.encode_modfun(hook, msg.modfun)
return msg | def function[encode_hook, parameter[self, hook, msg]]:
constant[
Encodes a commit hook dict into the protobuf message. Used in
bucket properties.
:param hook: the hook to encode
:type hook: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbCommitHook
:rtype riak.pb.riak_pb2.RpbCommitHook
]
if compare[constant[name] in name[hook]] begin[:]
name[msg].name assign[=] call[name[str_to_bytes], parameter[call[name[hook]][constant[name]]]]
return[name[msg]] | keyword[def] identifier[encode_hook] ( identifier[self] , identifier[hook] , identifier[msg] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[hook] :
identifier[msg] . identifier[name] = identifier[str_to_bytes] ( identifier[hook] [ literal[string] ])
keyword[else] :
identifier[self] . identifier[encode_modfun] ( identifier[hook] , identifier[msg] . identifier[modfun] )
keyword[return] identifier[msg] | def encode_hook(self, hook, msg):
"""
Encodes a commit hook dict into the protobuf message. Used in
bucket properties.
:param hook: the hook to encode
:type hook: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbCommitHook
:rtype riak.pb.riak_pb2.RpbCommitHook
"""
if 'name' in hook:
msg.name = str_to_bytes(hook['name']) # depends on [control=['if'], data=['hook']]
else:
self.encode_modfun(hook, msg.modfun)
return msg |
def connection_made(self):
"""Protocols connection established handler
"""
LOG.info(
'Connection to peer: %s established',
self._neigh_conf.ip_address,
extra={
'resource_name': self._neigh_conf.name,
'resource_id': self._neigh_conf.id
}
) | def function[connection_made, parameter[self]]:
constant[Protocols connection established handler
]
call[name[LOG].info, parameter[constant[Connection to peer: %s established], name[self]._neigh_conf.ip_address]] | keyword[def] identifier[connection_made] ( identifier[self] ):
literal[string]
identifier[LOG] . identifier[info] (
literal[string] ,
identifier[self] . identifier[_neigh_conf] . identifier[ip_address] ,
identifier[extra] ={
literal[string] : identifier[self] . identifier[_neigh_conf] . identifier[name] ,
literal[string] : identifier[self] . identifier[_neigh_conf] . identifier[id]
}
) | def connection_made(self):
"""Protocols connection established handler
"""
LOG.info('Connection to peer: %s established', self._neigh_conf.ip_address, extra={'resource_name': self._neigh_conf.name, 'resource_id': self._neigh_conf.id}) |
def _get_serv(ret=None):
'''
Return a redis server object
'''
_options = _get_options(ret)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_options.get('skip_full_coverage_check'),
decode_responses=True)
else:
REDIS_POOL = redis.StrictRedis(host=_options.get('host'),
port=_options.get('port'),
unix_socket_path=_options.get('unix_socket_path', None),
db=_options.get('db'),
decode_responses=True,
password=_options.get('password'))
return REDIS_POOL | def function[_get_serv, parameter[ret]]:
constant[
Return a redis server object
]
variable[_options] assign[=] call[name[_get_options], parameter[name[ret]]]
<ast.Global object at 0x7da1b210d300>
if name[REDIS_POOL] begin[:]
return[name[REDIS_POOL]]
return[name[REDIS_POOL]] | keyword[def] identifier[_get_serv] ( identifier[ret] = keyword[None] ):
literal[string]
identifier[_options] = identifier[_get_options] ( identifier[ret] )
keyword[global] identifier[REDIS_POOL]
keyword[if] identifier[REDIS_POOL] :
keyword[return] identifier[REDIS_POOL]
keyword[elif] identifier[_options] . identifier[get] ( literal[string] ):
identifier[REDIS_POOL] = identifier[StrictRedisCluster] ( identifier[startup_nodes] = identifier[_options] . identifier[get] ( literal[string] ),
identifier[skip_full_coverage_check] = identifier[_options] . identifier[get] ( literal[string] ),
identifier[decode_responses] = keyword[True] )
keyword[else] :
identifier[REDIS_POOL] = identifier[redis] . identifier[StrictRedis] ( identifier[host] = identifier[_options] . identifier[get] ( literal[string] ),
identifier[port] = identifier[_options] . identifier[get] ( literal[string] ),
identifier[unix_socket_path] = identifier[_options] . identifier[get] ( literal[string] , keyword[None] ),
identifier[db] = identifier[_options] . identifier[get] ( literal[string] ),
identifier[decode_responses] = keyword[True] ,
identifier[password] = identifier[_options] . identifier[get] ( literal[string] ))
keyword[return] identifier[REDIS_POOL] | def _get_serv(ret=None):
"""
Return a redis server object
"""
_options = _get_options(ret)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL # depends on [control=['if'], data=[]]
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'), skip_full_coverage_check=_options.get('skip_full_coverage_check'), decode_responses=True) # depends on [control=['if'], data=[]]
else:
REDIS_POOL = redis.StrictRedis(host=_options.get('host'), port=_options.get('port'), unix_socket_path=_options.get('unix_socket_path', None), db=_options.get('db'), decode_responses=True, password=_options.get('password'))
return REDIS_POOL |
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Convert from pandas data types
if is_dataframe(X):
# Get column names before reverting to an np.ndarray
if self.features_ is None:
self.features_ = np.array(X.columns)
X = X.values
if is_series(y):
y = y.values
# Assign integer labels to the feature columns from the input
if self.features_ is None:
self.features_ = np.arange(0, X.shape[1])
# Ensure that all classes are represented in the color mapping (before sample)
# NOTE: np.unique also specifies the ordering of the classes
if self.classes_ is None:
self.classes_ = [str(label) for label in np.unique(y)]
# Create the color mapping for each class
# TODO: Allow both colormap, listed colors, and palette definition
# TODO: Make this an independent function or property for override!
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.color
)
self._colors = dict(zip(self.classes_, color_values))
# Ticks for each feature specified
self._increments = np.arange(len(self.features_))
# Subsample instances
X, y = self._subsample(X, y)
# Normalize instances
if self.normalize is not None:
X = self.NORMALIZERS[self.normalize].fit_transform(X)
# the super method calls draw and returns self
return super(ParallelCoordinates, self).fit(X, y, **kwargs) | def function[fit, parameter[self, X, y]]:
constant[
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
]
if call[name[is_dataframe], parameter[name[X]]] begin[:]
if compare[name[self].features_ is constant[None]] begin[:]
name[self].features_ assign[=] call[name[np].array, parameter[name[X].columns]]
variable[X] assign[=] name[X].values
if call[name[is_series], parameter[name[y]]] begin[:]
variable[y] assign[=] name[y].values
if compare[name[self].features_ is constant[None]] begin[:]
name[self].features_ assign[=] call[name[np].arange, parameter[constant[0], call[name[X].shape][constant[1]]]]
if compare[name[self].classes_ is constant[None]] begin[:]
name[self].classes_ assign[=] <ast.ListComp object at 0x7da204622c50>
variable[color_values] assign[=] call[name[resolve_colors], parameter[]]
name[self]._colors assign[=] call[name[dict], parameter[call[name[zip], parameter[name[self].classes_, name[color_values]]]]]
name[self]._increments assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[self].features_]]]]
<ast.Tuple object at 0x7da2046232b0> assign[=] call[name[self]._subsample, parameter[name[X], name[y]]]
if compare[name[self].normalize is_not constant[None]] begin[:]
variable[X] assign[=] call[call[name[self].NORMALIZERS][name[self].normalize].fit_transform, parameter[name[X]]]
return[call[call[name[super], parameter[name[ParallelCoordinates], name[self]]].fit, parameter[name[X], name[y]]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[X] , identifier[y] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[is_dataframe] ( identifier[X] ):
keyword[if] identifier[self] . identifier[features_] keyword[is] keyword[None] :
identifier[self] . identifier[features_] = identifier[np] . identifier[array] ( identifier[X] . identifier[columns] )
identifier[X] = identifier[X] . identifier[values]
keyword[if] identifier[is_series] ( identifier[y] ):
identifier[y] = identifier[y] . identifier[values]
keyword[if] identifier[self] . identifier[features_] keyword[is] keyword[None] :
identifier[self] . identifier[features_] = identifier[np] . identifier[arange] ( literal[int] , identifier[X] . identifier[shape] [ literal[int] ])
keyword[if] identifier[self] . identifier[classes_] keyword[is] keyword[None] :
identifier[self] . identifier[classes_] =[ identifier[str] ( identifier[label] ) keyword[for] identifier[label] keyword[in] identifier[np] . identifier[unique] ( identifier[y] )]
identifier[color_values] = identifier[resolve_colors] (
identifier[n_colors] = identifier[len] ( identifier[self] . identifier[classes_] ), identifier[colormap] = identifier[self] . identifier[colormap] , identifier[colors] = identifier[self] . identifier[color]
)
identifier[self] . identifier[_colors] = identifier[dict] ( identifier[zip] ( identifier[self] . identifier[classes_] , identifier[color_values] ))
identifier[self] . identifier[_increments] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[self] . identifier[features_] ))
identifier[X] , identifier[y] = identifier[self] . identifier[_subsample] ( identifier[X] , identifier[y] )
keyword[if] identifier[self] . identifier[normalize] keyword[is] keyword[not] keyword[None] :
identifier[X] = identifier[self] . identifier[NORMALIZERS] [ identifier[self] . identifier[normalize] ]. identifier[fit_transform] ( identifier[X] )
keyword[return] identifier[super] ( identifier[ParallelCoordinates] , identifier[self] ). identifier[fit] ( identifier[X] , identifier[y] ,** identifier[kwargs] ) | def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Convert from pandas data types
if is_dataframe(X):
# Get column names before reverting to an np.ndarray
if self.features_ is None:
self.features_ = np.array(X.columns) # depends on [control=['if'], data=[]]
X = X.values # depends on [control=['if'], data=[]]
if is_series(y):
y = y.values # depends on [control=['if'], data=[]]
# Assign integer labels to the feature columns from the input
if self.features_ is None:
self.features_ = np.arange(0, X.shape[1]) # depends on [control=['if'], data=[]]
# Ensure that all classes are represented in the color mapping (before sample)
# NOTE: np.unique also specifies the ordering of the classes
if self.classes_ is None:
self.classes_ = [str(label) for label in np.unique(y)] # depends on [control=['if'], data=[]]
# Create the color mapping for each class
# TODO: Allow both colormap, listed colors, and palette definition
# TODO: Make this an independent function or property for override!
color_values = resolve_colors(n_colors=len(self.classes_), colormap=self.colormap, colors=self.color)
self._colors = dict(zip(self.classes_, color_values))
# Ticks for each feature specified
self._increments = np.arange(len(self.features_))
# Subsample instances
(X, y) = self._subsample(X, y)
# Normalize instances
if self.normalize is not None:
X = self.NORMALIZERS[self.normalize].fit_transform(X) # depends on [control=['if'], data=[]]
# the super method calls draw and returns self
return super(ParallelCoordinates, self).fit(X, y, **kwargs) |
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out | def function[_str_member_list, parameter[self, name]]:
constant[
Generate a member listing, autosummary:: table where possible,
and a table where not.
]
variable[out] assign[=] list[[]]
if call[name[self]][name[name]] begin[:]
<ast.AugAssign object at 0x7da18ede5750>
variable[prefix] assign[=] call[name[getattr], parameter[name[self], constant[_name], constant[]]]
if name[prefix] begin[:]
variable[prefix] assign[=] binary_operation[constant[~%s.] <ast.Mod object at 0x7da2590d6920> name[prefix]]
variable[autosum] assign[=] list[[]]
variable[others] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18ede5a80>, <ast.Name object at 0x7da18ede6f20>, <ast.Name object at 0x7da18ede7b20>]]] in starred[call[name[self]][name[name]]] begin[:]
variable[param] assign[=] call[name[param].strip, parameter[]]
variable[param_obj] assign[=] call[name[getattr], parameter[name[self]._obj, name[param], constant[None]]]
if <ast.UnaryOp object at 0x7da1b1109690> begin[:]
variable[param_obj] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b1108430> begin[:]
<ast.AugAssign object at 0x7da1b1109300>
if name[autosum] begin[:]
<ast.AugAssign object at 0x7da1b1109420>
if name[self].class_members_toctree begin[:]
<ast.AugAssign object at 0x7da1b110a200>
<ast.AugAssign object at 0x7da1b1108550>
if name[others] begin[:]
variable[maxlen_0] assign[=] call[name[max], parameter[constant[3], call[name[max], parameter[<ast.ListComp object at 0x7da1b1108730>]]]]
variable[hdr] assign[=] binary_operation[binary_operation[binary_operation[call[name[sixu], parameter[constant[=]]] * name[maxlen_0]] + call[name[sixu], parameter[constant[ ]]]] + binary_operation[call[name[sixu], parameter[constant[=]]] * constant[10]]]
variable[fmt] assign[=] binary_operation[call[name[sixu], parameter[constant[%%%ds %%s ]]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b110ab30>]]]
<ast.AugAssign object at 0x7da1b110a920>
for taget[tuple[[<ast.Name object at 0x7da1b1109f60>, <ast.Name object at 0x7da1b1108eb0>, <ast.Name object at 0x7da1b1109f30>]]] in starred[name[others]] begin[:]
variable[desc] assign[=] call[call[call[name[sixu], parameter[constant[ ]]].join, parameter[<ast.GeneratorExp object at 0x7da1b1108b50>]].strip, parameter[]]
if name[param_type] begin[:]
variable[desc] assign[=] binary_operation[constant[(%s) %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b110ad40>, <ast.Name object at 0x7da1b11088b0>]]]
<ast.AugAssign object at 0x7da1b1108250>
<ast.AugAssign object at 0x7da1b110ab00>
<ast.AugAssign object at 0x7da1b11096f0>
return[name[out]] | keyword[def] identifier[_str_member_list] ( identifier[self] , identifier[name] ):
literal[string]
identifier[out] =[]
keyword[if] identifier[self] [ identifier[name] ]:
identifier[out] +=[ literal[string] % identifier[name] , literal[string] ]
identifier[prefix] = identifier[getattr] ( identifier[self] , literal[string] , literal[string] )
keyword[if] identifier[prefix] :
identifier[prefix] = literal[string] % identifier[prefix]
identifier[autosum] =[]
identifier[others] =[]
keyword[for] identifier[param] , identifier[param_type] , identifier[desc] keyword[in] identifier[self] [ identifier[name] ]:
identifier[param] = identifier[param] . identifier[strip] ()
identifier[param_obj] = identifier[getattr] ( identifier[self] . identifier[_obj] , identifier[param] , keyword[None] )
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[param_obj] , identifier[collections] . identifier[Callable] )
keyword[or] identifier[isinstance] ( identifier[param_obj] , identifier[property] )
keyword[or] identifier[inspect] . identifier[isgetsetdescriptor] ( identifier[param_obj] )):
identifier[param_obj] = keyword[None]
keyword[if] identifier[param_obj] keyword[and] ( identifier[pydoc] . identifier[getdoc] ( identifier[param_obj] ) keyword[or] keyword[not] identifier[desc] ):
identifier[autosum] +=[ literal[string] %( identifier[prefix] , identifier[param] )]
keyword[else] :
identifier[others] . identifier[append] (( identifier[param] , identifier[param_type] , identifier[desc] ))
keyword[if] identifier[autosum] :
identifier[out] +=[ literal[string] ]
keyword[if] identifier[self] . identifier[class_members_toctree] :
identifier[out] +=[ literal[string] ]
identifier[out] +=[ literal[string] ]+ identifier[autosum]
keyword[if] identifier[others] :
identifier[maxlen_0] = identifier[max] ( literal[int] , identifier[max] ([ identifier[len] ( identifier[x] [ literal[int] ]) keyword[for] identifier[x] keyword[in] identifier[others] ]))
identifier[hdr] = identifier[sixu] ( literal[string] )* identifier[maxlen_0] + identifier[sixu] ( literal[string] )+ identifier[sixu] ( literal[string] )* literal[int]
identifier[fmt] = identifier[sixu] ( literal[string] )%( identifier[maxlen_0] ,)
identifier[out] +=[ literal[string] , identifier[hdr] ]
keyword[for] identifier[param] , identifier[param_type] , identifier[desc] keyword[in] identifier[others] :
identifier[desc] = identifier[sixu] ( literal[string] ). identifier[join] ( identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[desc] ). identifier[strip] ()
keyword[if] identifier[param_type] :
identifier[desc] = literal[string] %( identifier[param_type] , identifier[desc] )
identifier[out] +=[ identifier[fmt] %( identifier[param] . identifier[strip] (), identifier[desc] )]
identifier[out] +=[ identifier[hdr] ]
identifier[out] +=[ literal[string] ]
keyword[return] identifier[out] | def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix # depends on [control=['if'], data=[]]
autosum = []
others = []
for (param, param_type, desc) in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable) or isinstance(param_obj, property) or inspect.isgetsetdescriptor(param_obj)):
param_obj = None # depends on [control=['if'], data=[]]
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [' %s%s' % (prefix, param)] # depends on [control=['if'], data=[]]
else:
others.append((param, param_type, desc)) # depends on [control=['for'], data=[]]
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:'] # depends on [control=['if'], data=[]]
out += [''] + autosum # depends on [control=['if'], data=[]]
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu('=') * maxlen_0 + sixu(' ') + sixu('=') * 10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for (param, param_type, desc) in others:
desc = sixu(' ').join((x.strip() for x in desc)).strip()
if param_type:
desc = '(%s) %s' % (param_type, desc) # depends on [control=['if'], data=[]]
out += [fmt % (param.strip(), desc)] # depends on [control=['for'], data=[]]
out += [hdr] # depends on [control=['if'], data=[]]
out += [''] # depends on [control=['if'], data=[]]
return out |
def update(self):
"""
Do the pulling if necessary
"""
# Fetch remotes, so we know we're dealing with latest remote
yield from self.update_remotes()
# Rename local untracked files that might be overwritten by pull
yield from self.rename_local_untracked()
# Reset local files that have been deleted. We don't actually expect users to
# delete something that's present upstream and expect to keep it. This prevents
# unnecessary conflicts, and also allows users to click the link again to get
# a fresh copy of a file they might have screwed up.
yield from self.reset_deleted_files()
# If there are local changes, make a commit so we can do merges when pulling
# We also allow empty commits. On NFS (at least), sometimes repo_is_dirty returns a false
# positive, returning True even when there are no local changes (git diff-files seems to return
# bogus output?). While ideally that would not happen, allowing empty commits keeps us
# resilient to that issue.
if self.repo_is_dirty():
yield from self.ensure_lock()
yield from execute_cmd(['git', 'commit', '-am', 'WIP', '--allow-empty'], cwd=self.repo_dir)
# Merge master into local!
yield from self.ensure_lock()
yield from execute_cmd(['git', 'merge', '-Xours', 'origin/{}'.format(self.branch_name)], cwd=self.repo_dir) | def function[update, parameter[self]]:
constant[
Do the pulling if necessary
]
<ast.YieldFrom object at 0x7da1b119fc70>
<ast.YieldFrom object at 0x7da18f723820>
<ast.YieldFrom object at 0x7da18f722380>
if call[name[self].repo_is_dirty, parameter[]] begin[:]
<ast.YieldFrom object at 0x7da1b1153040>
<ast.YieldFrom object at 0x7da1b1150df0>
<ast.YieldFrom object at 0x7da1b11523e0>
<ast.YieldFrom object at 0x7da1b11518a0> | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
keyword[yield] keyword[from] identifier[self] . identifier[update_remotes] ()
keyword[yield] keyword[from] identifier[self] . identifier[rename_local_untracked] ()
keyword[yield] keyword[from] identifier[self] . identifier[reset_deleted_files] ()
keyword[if] identifier[self] . identifier[repo_is_dirty] ():
keyword[yield] keyword[from] identifier[self] . identifier[ensure_lock] ()
keyword[yield] keyword[from] identifier[execute_cmd] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[self] . identifier[repo_dir] )
keyword[yield] keyword[from] identifier[self] . identifier[ensure_lock] ()
keyword[yield] keyword[from] identifier[execute_cmd] ([ literal[string] , literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[branch_name] )], identifier[cwd] = identifier[self] . identifier[repo_dir] ) | def update(self):
"""
Do the pulling if necessary
"""
# Fetch remotes, so we know we're dealing with latest remote
yield from self.update_remotes()
# Rename local untracked files that might be overwritten by pull
yield from self.rename_local_untracked()
# Reset local files that have been deleted. We don't actually expect users to
# delete something that's present upstream and expect to keep it. This prevents
# unnecessary conflicts, and also allows users to click the link again to get
# a fresh copy of a file they might have screwed up.
yield from self.reset_deleted_files()
# If there are local changes, make a commit so we can do merges when pulling
# We also allow empty commits. On NFS (at least), sometimes repo_is_dirty returns a false
# positive, returning True even when there are no local changes (git diff-files seems to return
# bogus output?). While ideally that would not happen, allowing empty commits keeps us
# resilient to that issue.
if self.repo_is_dirty():
yield from self.ensure_lock()
yield from execute_cmd(['git', 'commit', '-am', 'WIP', '--allow-empty'], cwd=self.repo_dir) # depends on [control=['if'], data=[]]
# Merge master into local!
yield from self.ensure_lock()
yield from execute_cmd(['git', 'merge', '-Xours', 'origin/{}'.format(self.branch_name)], cwd=self.repo_dir) |
def full_restapi_key_transformer(key, attr_desc, value):
"""A key transformer that returns the full RestAPI key path.
:param str _: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: A list of keys using RestAPI syntax.
"""
keys = _FLATTEN.split(attr_desc['key'])
return ([_decode_attribute_map_key(k) for k in keys], value) | def function[full_restapi_key_transformer, parameter[key, attr_desc, value]]:
constant[A key transformer that returns the full RestAPI key path.
:param str _: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: A list of keys using RestAPI syntax.
]
variable[keys] assign[=] call[name[_FLATTEN].split, parameter[call[name[attr_desc]][constant[key]]]]
return[tuple[[<ast.ListComp object at 0x7da20c7ca230>, <ast.Name object at 0x7da20c7c85e0>]]] | keyword[def] identifier[full_restapi_key_transformer] ( identifier[key] , identifier[attr_desc] , identifier[value] ):
literal[string]
identifier[keys] = identifier[_FLATTEN] . identifier[split] ( identifier[attr_desc] [ literal[string] ])
keyword[return] ([ identifier[_decode_attribute_map_key] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[keys] ], identifier[value] ) | def full_restapi_key_transformer(key, attr_desc, value):
"""A key transformer that returns the full RestAPI key path.
:param str _: The attribute name
:param dict attr_desc: The attribute metadata
:param object value: The value
:returns: A list of keys using RestAPI syntax.
"""
keys = _FLATTEN.split(attr_desc['key'])
return ([_decode_attribute_map_key(k) for k in keys], value) |
def find_kw(ar_or_sample, kw):
""" This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
"""
for analysis in find_analyses(ar_or_sample):
if kw in get_interims_keywords(analysis):
return analysis.getKeyword()
return None | def function[find_kw, parameter[ar_or_sample, kw]]:
constant[ This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
]
for taget[name[analysis]] in starred[call[name[find_analyses], parameter[name[ar_or_sample]]]] begin[:]
if compare[name[kw] in call[name[get_interims_keywords], parameter[name[analysis]]]] begin[:]
return[call[name[analysis].getKeyword, parameter[]]]
return[constant[None]] | keyword[def] identifier[find_kw] ( identifier[ar_or_sample] , identifier[kw] ):
literal[string]
keyword[for] identifier[analysis] keyword[in] identifier[find_analyses] ( identifier[ar_or_sample] ):
keyword[if] identifier[kw] keyword[in] identifier[get_interims_keywords] ( identifier[analysis] ):
keyword[return] identifier[analysis] . identifier[getKeyword] ()
keyword[return] keyword[None] | def find_kw(ar_or_sample, kw):
""" This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces.
"""
for analysis in find_analyses(ar_or_sample):
if kw in get_interims_keywords(analysis):
return analysis.getKeyword() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['analysis']]
return None |
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip, Net):
ip = next(iter(ip))
ip = inet_ntoa(inet_aton(ip or "0.0.0.0"))
tmp = [orb(e) for e in inet_aton(ip)]
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1] & 0x7f, tmp[2], tmp[3])
iff, _, gw = conf.route.route(ip)
if ((iff == consts.LOOPBACK_INTERFACE) or (ip == conf.route.get_if_bcast(iff))): # noqa: E501
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
try:
res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface=iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
except Exception:
return None
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None | def function[getmacbyip, parameter[ip, chainCC]]:
constant[Return MAC address corresponding to a given IP address]
if call[name[isinstance], parameter[name[ip], name[Net]]] begin[:]
variable[ip] assign[=] call[name[next], parameter[call[name[iter], parameter[name[ip]]]]]
variable[ip] assign[=] call[name[inet_ntoa], parameter[call[name[inet_aton], parameter[<ast.BoolOp object at 0x7da2044c07f0>]]]]
variable[tmp] assign[=] <ast.ListComp object at 0x7da2044c0730>
if compare[binary_operation[call[name[tmp]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[240]] equal[==] constant[224]] begin[:]
return[binary_operation[constant[01:00:5e:%.2x:%.2x:%.2x] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b1f27640>, <ast.Subscript object at 0x7da1b1f27e20>, <ast.Subscript object at 0x7da1b1f26920>]]]]
<ast.Tuple object at 0x7da1b1f26ef0> assign[=] call[name[conf].route.route, parameter[name[ip]]]
if <ast.BoolOp object at 0x7da1b1f250c0> begin[:]
return[constant[ff:ff:ff:ff:ff:ff]]
if compare[name[gw] not_equal[!=] constant[0.0.0.0]] begin[:]
variable[ip] assign[=] name[gw]
variable[mac] assign[=] call[name[conf].netcache.arp_cache.get, parameter[name[ip]]]
if name[mac] begin[:]
return[name[mac]]
<ast.Try object at 0x7da20eb2a110>
if compare[name[res] is_not constant[None]] begin[:]
variable[mac] assign[=] name[res].payload.hwsrc
call[name[conf].netcache.arp_cache][name[ip]] assign[=] name[mac]
return[name[mac]]
return[constant[None]] | keyword[def] identifier[getmacbyip] ( identifier[ip] , identifier[chainCC] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[ip] , identifier[Net] ):
identifier[ip] = identifier[next] ( identifier[iter] ( identifier[ip] ))
identifier[ip] = identifier[inet_ntoa] ( identifier[inet_aton] ( identifier[ip] keyword[or] literal[string] ))
identifier[tmp] =[ identifier[orb] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[inet_aton] ( identifier[ip] )]
keyword[if] ( identifier[tmp] [ literal[int] ]& literal[int] )== literal[int] :
keyword[return] literal[string] %( identifier[tmp] [ literal[int] ]& literal[int] , identifier[tmp] [ literal[int] ], identifier[tmp] [ literal[int] ])
identifier[iff] , identifier[_] , identifier[gw] = identifier[conf] . identifier[route] . identifier[route] ( identifier[ip] )
keyword[if] (( identifier[iff] == identifier[consts] . identifier[LOOPBACK_INTERFACE] ) keyword[or] ( identifier[ip] == identifier[conf] . identifier[route] . identifier[get_if_bcast] ( identifier[iff] ))):
keyword[return] literal[string]
keyword[if] identifier[gw] != literal[string] :
identifier[ip] = identifier[gw]
identifier[mac] = identifier[conf] . identifier[netcache] . identifier[arp_cache] . identifier[get] ( identifier[ip] )
keyword[if] identifier[mac] :
keyword[return] identifier[mac]
keyword[try] :
identifier[res] = identifier[srp1] ( identifier[Ether] ( identifier[dst] = identifier[ETHER_BROADCAST] )/ identifier[ARP] ( identifier[op] = literal[string] , identifier[pdst] = identifier[ip] ),
identifier[type] = identifier[ETH_P_ARP] ,
identifier[iface] = identifier[iff] ,
identifier[timeout] = literal[int] ,
identifier[verbose] = literal[int] ,
identifier[chainCC] = identifier[chainCC] ,
identifier[nofilter] = literal[int] )
keyword[except] identifier[Exception] :
keyword[return] keyword[None]
keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] :
identifier[mac] = identifier[res] . identifier[payload] . identifier[hwsrc]
identifier[conf] . identifier[netcache] . identifier[arp_cache] [ identifier[ip] ]= identifier[mac]
keyword[return] identifier[mac]
keyword[return] keyword[None] | def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip, Net):
ip = next(iter(ip)) # depends on [control=['if'], data=[]]
ip = inet_ntoa(inet_aton(ip or '0.0.0.0'))
tmp = [orb(e) for e in inet_aton(ip)]
if tmp[0] & 240 == 224: # mcast @
return '01:00:5e:%.2x:%.2x:%.2x' % (tmp[1] & 127, tmp[2], tmp[3]) # depends on [control=['if'], data=[]]
(iff, _, gw) = conf.route.route(ip)
if iff == consts.LOOPBACK_INTERFACE or ip == conf.route.get_if_bcast(iff): # noqa: E501
return 'ff:ff:ff:ff:ff:ff' # depends on [control=['if'], data=[]]
if gw != '0.0.0.0':
ip = gw # depends on [control=['if'], data=['gw']]
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac # depends on [control=['if'], data=[]]
try:
res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op='who-has', pdst=ip), type=ETH_P_ARP, iface=iff, timeout=2, verbose=0, chainCC=chainCC, nofilter=1) # depends on [control=['try'], data=[]]
except Exception:
return None # depends on [control=['except'], data=[]]
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac # depends on [control=['if'], data=['res']]
return None |
def _create_executor(self, handler, args, cpus_per_worker=1):
"""Return a new :class:`.Executor` instance."""
if self._args.parallel > 0:
workers = self._args.parallel
else:
try:
workers = mp.cpu_count() // cpus_per_worker
except NotImplementedError:
workers = 1
if workers != 1:
logger.info('Using {} parallel worker processes...'.format(
workers))
executor = ProcessPoolExecutor(
processes=workers, handler_init=handler, handler_args=args)
else:
logger.info('Using single worker...')
executor = SequentialExecutor(
handler_init=handler, handler_args=args)
return executor | def function[_create_executor, parameter[self, handler, args, cpus_per_worker]]:
constant[Return a new :class:`.Executor` instance.]
if compare[name[self]._args.parallel greater[>] constant[0]] begin[:]
variable[workers] assign[=] name[self]._args.parallel
if compare[name[workers] not_equal[!=] constant[1]] begin[:]
call[name[logger].info, parameter[call[constant[Using {} parallel worker processes...].format, parameter[name[workers]]]]]
variable[executor] assign[=] call[name[ProcessPoolExecutor], parameter[]]
return[name[executor]] | keyword[def] identifier[_create_executor] ( identifier[self] , identifier[handler] , identifier[args] , identifier[cpus_per_worker] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[_args] . identifier[parallel] > literal[int] :
identifier[workers] = identifier[self] . identifier[_args] . identifier[parallel]
keyword[else] :
keyword[try] :
identifier[workers] = identifier[mp] . identifier[cpu_count] ()// identifier[cpus_per_worker]
keyword[except] identifier[NotImplementedError] :
identifier[workers] = literal[int]
keyword[if] identifier[workers] != literal[int] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[workers] ))
identifier[executor] = identifier[ProcessPoolExecutor] (
identifier[processes] = identifier[workers] , identifier[handler_init] = identifier[handler] , identifier[handler_args] = identifier[args] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[executor] = identifier[SequentialExecutor] (
identifier[handler_init] = identifier[handler] , identifier[handler_args] = identifier[args] )
keyword[return] identifier[executor] | def _create_executor(self, handler, args, cpus_per_worker=1):
"""Return a new :class:`.Executor` instance."""
if self._args.parallel > 0:
workers = self._args.parallel # depends on [control=['if'], data=[]]
else:
try:
workers = mp.cpu_count() // cpus_per_worker # depends on [control=['try'], data=[]]
except NotImplementedError:
workers = 1 # depends on [control=['except'], data=[]]
if workers != 1:
logger.info('Using {} parallel worker processes...'.format(workers))
executor = ProcessPoolExecutor(processes=workers, handler_init=handler, handler_args=args) # depends on [control=['if'], data=['workers']]
else:
logger.info('Using single worker...')
executor = SequentialExecutor(handler_init=handler, handler_args=args)
return executor |
def render_children(self, context, fragment, can_reorder=True, can_add=False):
"""
Renders the children of the module with HTML appropriate for Studio. If can_reorder is
True, then the children will be rendered to support drag and drop.
"""
contents = []
child_context = {'reorderable_items': set()}
if context:
child_context.update(context)
for child_id in self.children:
child = self.runtime.get_block(child_id)
if can_reorder:
child_context['reorderable_items'].add(child.scope_ids.usage_id)
view_to_render = 'author_view' if hasattr(child, 'author_view') else 'student_view'
rendered_child = child.render(view_to_render, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': text_type(child.scope_ids.usage_id),
'content': rendered_child.content
})
fragment.add_content(self.runtime.render_template("studio_render_children_view.html", {
'items': contents,
'xblock_context': context,
'can_add': can_add,
'can_reorder': can_reorder,
})) | def function[render_children, parameter[self, context, fragment, can_reorder, can_add]]:
constant[
Renders the children of the module with HTML appropriate for Studio. If can_reorder is
True, then the children will be rendered to support drag and drop.
]
variable[contents] assign[=] list[[]]
variable[child_context] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e73820>], [<ast.Call object at 0x7da1b0e73670>]]
if name[context] begin[:]
call[name[child_context].update, parameter[name[context]]]
for taget[name[child_id]] in starred[name[self].children] begin[:]
variable[child] assign[=] call[name[self].runtime.get_block, parameter[name[child_id]]]
if name[can_reorder] begin[:]
call[call[name[child_context]][constant[reorderable_items]].add, parameter[name[child].scope_ids.usage_id]]
variable[view_to_render] assign[=] <ast.IfExp object at 0x7da1b0e73700>
variable[rendered_child] assign[=] call[name[child].render, parameter[name[view_to_render], name[child_context]]]
call[name[fragment].add_frag_resources, parameter[name[rendered_child]]]
call[name[contents].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0e72c50>, <ast.Constant object at 0x7da1b0e735b0>], [<ast.Call object at 0x7da1b0e72b60>, <ast.Attribute object at 0x7da1b0e73d60>]]]]
call[name[fragment].add_content, parameter[call[name[self].runtime.render_template, parameter[constant[studio_render_children_view.html], dictionary[[<ast.Constant object at 0x7da1b0f61180>, <ast.Constant object at 0x7da1b0f3a6e0>, <ast.Constant object at 0x7da1b0f3bc70>, <ast.Constant object at 0x7da1b0f39bd0>], [<ast.Name object at 0x7da1b0f389d0>, <ast.Name object at 0x7da1b0f3b910>, <ast.Name object at 0x7da1b0f39450>, <ast.Name object at 0x7da1b0f385b0>]]]]]] | keyword[def] identifier[render_children] ( identifier[self] , identifier[context] , identifier[fragment] , identifier[can_reorder] = keyword[True] , identifier[can_add] = keyword[False] ):
literal[string]
identifier[contents] =[]
identifier[child_context] ={ literal[string] : identifier[set] ()}
keyword[if] identifier[context] :
identifier[child_context] . identifier[update] ( identifier[context] )
keyword[for] identifier[child_id] keyword[in] identifier[self] . identifier[children] :
identifier[child] = identifier[self] . identifier[runtime] . identifier[get_block] ( identifier[child_id] )
keyword[if] identifier[can_reorder] :
identifier[child_context] [ literal[string] ]. identifier[add] ( identifier[child] . identifier[scope_ids] . identifier[usage_id] )
identifier[view_to_render] = literal[string] keyword[if] identifier[hasattr] ( identifier[child] , literal[string] ) keyword[else] literal[string]
identifier[rendered_child] = identifier[child] . identifier[render] ( identifier[view_to_render] , identifier[child_context] )
identifier[fragment] . identifier[add_frag_resources] ( identifier[rendered_child] )
identifier[contents] . identifier[append] ({
literal[string] : identifier[text_type] ( identifier[child] . identifier[scope_ids] . identifier[usage_id] ),
literal[string] : identifier[rendered_child] . identifier[content]
})
identifier[fragment] . identifier[add_content] ( identifier[self] . identifier[runtime] . identifier[render_template] ( literal[string] ,{
literal[string] : identifier[contents] ,
literal[string] : identifier[context] ,
literal[string] : identifier[can_add] ,
literal[string] : identifier[can_reorder] ,
})) | def render_children(self, context, fragment, can_reorder=True, can_add=False):
"""
Renders the children of the module with HTML appropriate for Studio. If can_reorder is
True, then the children will be rendered to support drag and drop.
"""
contents = []
child_context = {'reorderable_items': set()}
if context:
child_context.update(context) # depends on [control=['if'], data=[]]
for child_id in self.children:
child = self.runtime.get_block(child_id)
if can_reorder:
child_context['reorderable_items'].add(child.scope_ids.usage_id) # depends on [control=['if'], data=[]]
view_to_render = 'author_view' if hasattr(child, 'author_view') else 'student_view'
rendered_child = child.render(view_to_render, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({'id': text_type(child.scope_ids.usage_id), 'content': rendered_child.content}) # depends on [control=['for'], data=['child_id']]
fragment.add_content(self.runtime.render_template('studio_render_children_view.html', {'items': contents, 'xblock_context': context, 'can_add': can_add, 'can_reorder': can_reorder})) |
def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern | def function[split_fixed_pattern, parameter[path]]:
constant[
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
]
variable[_first_pattern_pos] assign[=] call[name[path].find, parameter[constant[*]]]
variable[_path_separator_pos] assign[=] binary_operation[call[name[path].rfind, parameter[constant[/], constant[0], name[_first_pattern_pos]]] + constant[1]]
variable[_path_fixed] assign[=] call[name[path]][<ast.Slice object at 0x7da1b26afb50>]
variable[_path_pattern] assign[=] call[name[path]][<ast.Slice object at 0x7da1b26acbb0>]
return[tuple[[<ast.Name object at 0x7da2044c2b00>, <ast.Name object at 0x7da2044c2f50>]]] | keyword[def] identifier[split_fixed_pattern] ( identifier[path] ):
literal[string]
identifier[_first_pattern_pos] = identifier[path] . identifier[find] ( literal[string] )
identifier[_path_separator_pos] = identifier[path] . identifier[rfind] ( literal[string] , literal[int] , identifier[_first_pattern_pos] )+ literal[int]
identifier[_path_fixed] = identifier[path] [: identifier[_path_separator_pos] ]
identifier[_path_pattern] = identifier[path] [ identifier[_path_separator_pos] :]
keyword[return] identifier[_path_fixed] , identifier[_path_pattern] | def split_fixed_pattern(path):
"""
Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz
"""
_first_pattern_pos = path.find('*')
_path_separator_pos = path.rfind('/', 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return (_path_fixed, _path_pattern) |
def addIndividual(self, individual):
"""
Adds the specified individual to this dataset.
"""
id_ = individual.getId()
self._individualIdMap[id_] = individual
self._individualIds.append(id_)
self._individualNameMap[individual.getName()] = individual | def function[addIndividual, parameter[self, individual]]:
constant[
Adds the specified individual to this dataset.
]
variable[id_] assign[=] call[name[individual].getId, parameter[]]
call[name[self]._individualIdMap][name[id_]] assign[=] name[individual]
call[name[self]._individualIds.append, parameter[name[id_]]]
call[name[self]._individualNameMap][call[name[individual].getName, parameter[]]] assign[=] name[individual] | keyword[def] identifier[addIndividual] ( identifier[self] , identifier[individual] ):
literal[string]
identifier[id_] = identifier[individual] . identifier[getId] ()
identifier[self] . identifier[_individualIdMap] [ identifier[id_] ]= identifier[individual]
identifier[self] . identifier[_individualIds] . identifier[append] ( identifier[id_] )
identifier[self] . identifier[_individualNameMap] [ identifier[individual] . identifier[getName] ()]= identifier[individual] | def addIndividual(self, individual):
"""
Adds the specified individual to this dataset.
"""
id_ = individual.getId()
self._individualIdMap[id_] = individual
self._individualIds.append(id_)
self._individualNameMap[individual.getName()] = individual |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.